diff options
112 files changed, 3637 insertions, 5790 deletions
diff --git a/client/geocode/autocomplete.component.js b/client/geocode/autocomplete.component.js new file mode 100644 index 00000000..12419cf1 --- /dev/null +++ b/client/geocode/autocomplete.component.js @@ -0,0 +1,208 @@ +import React, { Component } from 'react' +// import PropTypes from 'prop-types' +import { bindActionCreators } from 'redux' +import { connect } from 'react-redux' + +function formatLabel(label, value) { + if (!value) { + return label + } + let len = 0 + return ( + <span> + { + label.split(new RegExp(value.replace(/[-\[\]\(\)\+\*\\\^\$\{\}\.\?\&\|\<\>]/g, ''), "i")) // eslint-disable-line + .reduce((prev, current, i) => { + if (!i) { + len += current.length + return [current] + } + const ret = prev.concat(<b key={i}>{label.substr(len, value.length)}</b>, current) + len += value.length + current.length + return ret + }, []) + } + </span> + ) +} + +function sanitizeForAutocomplete(s) { + return (s || "") + .toLowerCase() + .replace(/[^a-zA-Z0-9 ]/g, '') + .trim() + .replace(/\\/g, '') +} + +class Autocomplete extends Component { + constructor(props) { + super() + this.state = { + q: props.q || "", + selected: 0, + matches: [] + } + this.handleKeyDown = this.handleKeyDown.bind(this) + this.handleChange = this.handleChange.bind(this) + this.handleCancel = this.handleCancel.bind(this) + } + + componentWillMount() { + // build index based on what's in the hierarchy + const { nodes } = this.props.hierarchy + let index = [] + this.index = index + Object.keys(nodes).forEach(key => { + const node = nodes[key] + if (!key || !node || !node.name || !node.parent) return + let { name } = node + let prefixName = name + if (node.is_attribute) { + const parent = nodes[node.parent] + if (parent) { + prefixName = parent.name + " (" + name + ")" + } + } + index.push([sanitizeForAutocomplete(prefixName), name, node.id]) + node.synonyms + .split("\n") + .map(word => word = word.trim()) + .filter(word => !!word) + .forEach(word => index.push([prefixName, name, node.id])) + }) + } + + handleKeyDown(e) { + let id + switch (e.keyCode) { + case 27: // escape + e.preventDefault() + this.handleCancel() + break + case 37: // left + case 38: // up + e.preventDefault() + this.setState({ + selected: (this.state.matches.length + this.state.selected - 1) % this.state.matches.length + }) + return false + case 39: // right + case 40: // down + e.preventDefault() + this.setState({ + selected: (this.state.selected + 1) % this.state.matches.length + }) + return false + case 13: // enter + id = this.state.matches[this.state.selected] + e.preventDefault() + this.handleSelect(id) + return false + default: + break + } + } + + handleChange(e) { + // search for the given string in our index + const q = e.target.value + let value = sanitizeForAutocomplete(q) + if (!value.length) { + this.setState({ + q, + selected: 0, + matches: [], + }) + return + } + const re = new RegExp(value) + let matches = [] + let seen = {} + this.index.forEach(pair => { + if (seen[pair[2]]) return + if (pair[0].match(re)) { + seen[pair[2]] = true + if (pair[1].indexOf(value) === 0) { + matches.unshift(pair[2]) + } else { + matches.push(pair[2]) + } + } + }) + this.setState({ + q, + selected: 0, + matches: matches.slice(0, 10), + }) + } + + handleSelect(id) { + const { nodes } = this.props.hierarchy + const node = nodes[id] + if (this.props.onSelect) this.props.onSelect(node) + this.setState({ q: "", selected: 0, matches: [] }) + } + + handleCancel() { + if (this.props.onCancel) this.props.onCancel() + this.setState({ q: "", selected: 0, matches: [] }) + } + + render() { + // const suggestions = this.state.suggestions.map((suggestion)) + const { nodes } = this.props.hierarchy + const { q, selected } = this.state + const matches = this.state.matches.map((match, i) => { + const node = nodes[match] + const parent = nodes[node.parent] + let label + if (node.is_attribute) { + label = ( + <span> + {formatLabel(parent.name, q)} + {' '}<small>{'('}{formatLabel(node.name, q)}{')'}</small> + </span> + ) + } else { + label = formatLabel(node.name, q) + } + return ( + <div + key={i} + className={selected === i ? 'selected' : ''} + onClick={() => this.handleSelect(node.id)} + onMouseEnter={() => this.setState({ selected: i })} + > + {label} + </div> + ) + }) + return ( + <div className="autocomplete"> + <input + type="text" + name="q" + value={this.state.q} + onKeyDown={this.handleKeyDown} + onChange={this.handleChange} + autoFocus + autoCapitalize="off" + autoComplete="off" + placeholder="Start typing a name" + /> + <div className="matches"> + {matches} + </div> + </div> + ) + } +} + +const mapStateToProps = (state, ownProps) => ({ + onSelect: ownProps.onSelect, +}) + +const mapDispatchToProps = (dispatch) => ({ +}) + +export default connect(mapStateToProps, mapDispatchToProps)(Autocomplete) diff --git a/client/geocode/geocode.component.js b/client/geocode/geocode.component.js new file mode 100644 index 00000000..e57d238d --- /dev/null +++ b/client/geocode/geocode.component.js @@ -0,0 +1,25 @@ +import React, { Component } from 'react' + +import Autocomplete from './autocomplete.component' + +class GeocodeContainer extends Component { + // constructor() { + // super() + // } + render() { + // const { } = this.props + return ( + <div className=''> + <select> + {} + </select> + <div> + <Autocomplete /> + </div> + </div> + ) + } +} + + +export default GeocodeContainer diff --git a/client/geocode/index.js b/client/geocode/index.js new file mode 100644 index 00000000..4f0da3f6 --- /dev/null +++ b/client/geocode/index.js @@ -0,0 +1,10 @@ +import React from 'react' +import ReactDOM from 'react-dom' + +// import { toArray } from '../util' + +import GeocodeContainer from './autocomplete.component' + +ReactDOM.render( + <GeocodeContainer />, document.querySelector('#container') +) diff --git a/old/faiss/requirements.txt b/old/faiss/requirements.txt deleted file mode 100644 index 1d60aabc..00000000 --- a/old/faiss/requirements.txt +++ /dev/null @@ -1,11 +0,0 @@ -Pillow -h5py -tensorflow -Keras -Flask -opencv-python -imagehash -scikit-image -scikit-learn -imutils - diff --git a/old/faiss/run.sh b/old/faiss/run.sh deleted file mode 100644 index 8f9e77e2..00000000 --- a/old/faiss/run.sh +++ /dev/null @@ -1 +0,0 @@ -uwsgi --http 127.0.0.1:5000 --file wsgi.py --callable app --processes 1 diff --git a/old/faiss/static/css/app.css b/old/faiss/static/css/app.css deleted file mode 100644 index a3b24736..00000000 --- a/old/faiss/static/css/app.css +++ /dev/null @@ -1,289 +0,0 @@ -/* css boilerplate */ - -* { box-sizing: border-box; } -html,body { - margin: 0; padding: 0; - width: 100%; height: 100%; -} -body { - font-family: Helvetica, sans-serif; - font-weight: 300; - padding-top: 60px; -} - -/* header */ - -header { - position: fixed; - top: 0; - left: 0; - height: 60px; - width: 100%; - background: #11f; - color: white; - align-items: stretch; - display: flex; - flex-wrap: wrap; - justify-content: space-between; - z-index: 3; -} -header > section { - justify-content: flex-start; - align-items: center; - display: flex; - flex: 1 0; - font-weight: bold; -} -header > section:last-of-type { - justify-content: flex-end; -} -header a { - color: hsla(0,0%,100%,.89); - text-decoration: none; - line-height: 18px; - font-size: 14px; - font-weight: 700; - padding: .35rem .4rem; - white-space: nowrap; -} -header .logged-in { - font-size: 12px; - font-weight: normal; - padding: 0 0.5rem; -} -header .logout { - padding: 0 6px; - border-left: 1px solid #99f; -} -header .logout a { - font-size: 12px; -} -.menuToggle { - width: 30px; - height: 30px; - margin: 5px; - cursor: pointer; - line-height: 1; -} - -/* form at the top */ - -#form { - display: flex; - flex-direction: row; - justify-content: space-between; - align-items: center; - margin: 20px; - padding: 20px; - border: 1px solid #ddd; -} -input[type=text] { - border: 1px solid #888; - padding: 4px; - font-size: 15px; -} -input[type=file] { - max-width: 200px; - border-radius: 2px; -} -input[type=file]:invalid + button { visibility: hidden!important; } -input[type=file]:valid + button { visibility: visible!important; } -#form > div { - display: flex; - flex-direction: row; - align-items: center; -} -#form > div * { - margin: 0 3px; -} - -/* saving UI form */ - -label { - display: block; - white-space: nowrap; - padding-bottom: 10px; -} -label:last-child { - padding-bottom: 0; -} -label span { - display: inline-block; - min-width: 80px; -} -.saving_ui { - display: none; -} -.saving .saving_ui { - display: flex; - border: 1px solid #ddd; - margin: 20px; - padding: 20px; - flex-direction: row; - justify-content: space-between; -} - -/* query box, shows either searched image, directory name, etc */ - -.loading .results, -.prefetch .query, .prefetch .results, -.browsing .score, .browsing .browse, -.photo .browse, -.saving .score { - display: none; -} -.browsing .query div { display: inline; margin-left: 5px; font-weight: bold; } -.saving .query div { display: inline; margin-left: 5px; font-weight: bold; } -.load_message { - opacity: 0; -} -.loading .load_message { - display: block; - margin: 20px; - font-weight: bold; -} - -.query { - margin: 20px; -} -.query > div { - margin-top: 10px; - position: relative; - display: flex; - flex-direction: row; - align-items: flex-start; -} -.query img { - cursor: crosshair; - max-width: 400px; - display: block; -} -.query > div > .box { - position: absolute; - border: 1px solid #11f; - background: rgba(17,17,255,0.1); - pointer-events: none; -} -.query canvas { - margin-left: 20px; - max-width: 200px; -} - -/* search results */ - -.results { - display: flex; - flex-direction: row; - flex-wrap: wrap; -} -.results > div { - display: flex; - flex-direction: column; - justify-content: flex-end; - width: 210px; - margin: 15px; - padding: 5px; - border: 1px solid transparent; -} -.results > div.saved { - border-radius: 2px; - background: #fafaaa; -} -.results > div img { - cursor: pointer; - max-width: 210px; - margin-bottom: 10px; -} -.results > div > div { - display: flex; - flex-direction: row; - justify-content: space-between; - align-items: center; -} -.results a:visited .btn { - color: #99d; -} -.score { - font-size: 12px; - color: #444; -} - - -/* spinner */ - -.loader { - display: flex; - align-items: center; - justify-content: center; - position: absolute; - top: 0; left: 0; - width: 100%; height: 100%; - background: rgba(255,255,255,0.9); -} -.loader > div { - background: white; - padding: 20px; - box-shadow: 0 1px 2px #bbb; - border-radius: 2px; -} -.spinner { - position: relative; - width: 32px; - height: 32px; - color: #11f; - margin: 0 auto; -} -.spinner:after { - position: absolute; - margin: auto; - width: 100%; - height: 100%; - top: 0; - left: 0; - right: 0; - bottom: 0; - content: " "; - display: inline-block; - border-radius: 50%; - border-style: solid; - border-width: 0.15em; - -webkit-background-clip: padding-box; - border-color: currentColor currentColor currentColor transparent; - box-sizing: border-box; - -webkit-animation: ld-cycle 0.7s infinite linear; - animation: ld-cycle 0.7s infinite linear; -} -@-webkit-keyframes ld-cycle { - 0%, 50%, 100% { - animation-timing-function: cubic-bezier(0.5, 0.5, 0.5, 0.5); - } - 0% { - -webkit-transform: rotate(0); - transform: rotate(0); - } - 50% { - -webkit-transform: rotate(180deg); - transform: rotate(180deg); - } - 100% { - -webkit-transform: rotate(360deg); - transform: rotate(360deg); - } -} -@keyframes ld-cycle { - 0%, 50%, 100% { - animation-timing-function: cubic-bezier(0.5, 0.5, 0.5, 0.5); - } - 0% { - -webkit-transform: rotate(0); - transform: rotate(0); - } - 50% { - -webkit-transform: rotate(180deg); - transform: rotate(180deg); - } - 100% { - -webkit-transform: rotate(360deg); - transform: rotate(360deg); - } -} diff --git a/old/faiss/static/favicon.ico b/old/faiss/static/favicon.ico Binary files differdeleted file mode 100644 index d97f2f59..00000000 --- a/old/faiss/static/favicon.ico +++ /dev/null diff --git a/old/faiss/static/img/play.png b/old/faiss/static/img/play.png Binary files differdeleted file mode 100644 index 40f76045..00000000 --- a/old/faiss/static/img/play.png +++ /dev/null diff --git a/old/faiss/static/index.html b/old/faiss/static/index.html deleted file mode 100644 index cf59c628..00000000 --- a/old/faiss/static/index.html +++ /dev/null @@ -1,83 +0,0 @@ -<!doctype html> -<html> -<head> -<meta charset="UTF-8" /> -<base href='/search/' /> -<link rel="stylesheet" href="static/css/app.css"> -<link rel="shortcut icon" href="static/favicon.ico" /> -<title>VFrame Image Import</title> -</head> -<body class='prefetch'> - -<header> - <section class="navbar-section"> - <a href="/"><img class="menuToggle" alt='logo' src="static/css/vframe-logo.png" /></a> - <a href="/categories/">Categories</a> - <a href="/groups/user/">Assignments</a> - <a href="/images/new/">Add Image</a> - <a href="/search/">Search</a> - </section> - - <section class="navbar-section last-navbar-section"> - <span class="menu-help"><a href="/static/explore/treemap.html">Explore Data</a></span> - <span class="menu-help"><a href="/help/">Help</a></span> - <span class="login-out logged-in"><span class="capitalize"></span></span> - <span class="logout login-out"><a href="/accounts/logout/">Logout</a></span> - </section> -</header> - -<div class="container"> - -<div id="form"> - <div> - <input type="file" name="img" accept="image/*" required> - <button class='btn upload_again'>Upload Again</button> - </div> - <div class="general_ui"> - <button class='btn panic'>Panic</button> - <button class='btn random'>Random</button> - <button class='btn view_saved'>View Saved</button> - </div> -</div> - -<div class="saving_ui"> - <div> - <label><span>Title</span> <input type="text" name="title" placeholder="Enter a title"></label> - <label><span></span><input type="checkbox" name="graphic"> Graphic content</label> - </label> - <label><span></span> - <button class='btn create_new_group'>Create New Group</button> - <button class='btn check'>Check Duplicates</button> - <button class='btn reset'>Clear Selection</button> - </label> - </div> -</div> - -<div class="query"> - <span class='msg'></span> - <div></div> -</div> - -<div class="results"> -</div> - - -</div> -<script type="text/html" id="result-template"> - <div class='{className}'> - <img src="{img}" crossorigin="anonymous"> - <div> - <div class='score'>{score}</div> - <a href='{metadata}'><button class='btn metadata'>Info</button></a> - <a href='{browse}'><button class='btn browse'>Expand</button></a> - <a href='{search}'><button class='btn search'>Search</button></a> - </div> - </div> -</script> -</body> -<script src="static/js/store2.min.js"></script> -<script src="static/js/dataUriToBlob.js"></script> -<script src="static/js/app.js"></script> -</html> - - diff --git a/old/faiss/static/js/app.js b/old/faiss/static/js/app.js deleted file mode 100644 index 77164c76..00000000 --- a/old/faiss/static/js/app.js +++ /dev/null @@ -1,491 +0,0 @@ -/* eslint no-use-before-define: 0, camelcase: 0, one-var-declaration-per-line: 0, one-var: 0, quotes: 0, prefer-destructuring: 0, no-alert: 0, no-console: 0, no-multi-assign: 0 */ - -function loadApp() { - const result_template = document.querySelector('#result-template').innerHTML - const results_el = document.querySelector('.results') - const query_div = document.body.querySelector('.query > div') - let bounds - let token, username - let x, y, mouse_x, mouse_y, dx, dy, box - let dragging = false - let cropping = false - let creating = false - let did_check = false - - function init() { - login() - bind() - route() - } - function bind() { - window.onpopstate = route - document.querySelector('[name=img]').addEventListener('change', upload) - on('click', '.results a', preventDefault) - on('click', '.search', search) - on('click', '.panic', panic) - on('click', '.upload_again', upload_again) - on('click', '.browse', browse) - on('click', '.results img', save) - on('click', '.view_saved', loadSaved) - on('click', '.create_new_group', createNewGroup) - on('click', '.reset', reset) - on('click', '.random', random) - on('click', '.check', check) - on('mousedown', '.query img', down) - window.addEventListener('mousemove', move) - window.addEventListener('mouseup', up) - window.addEventListener('keydown', keydown) - } - function route() { - const path = window.location.pathname.split('/') - // remove initial slash - path.shift() - // remove dummy route - if (path[0] === 'search') path.shift() - switch (path[0]) { - case 'fetch': - search({ target: { url: window.location.search.substr(1).split('=')[1] } }) - break - case 'view': - search(path.slice(1)) - break - case 'q': - if (path.length === 3) { - search({ target: { dir: path[1], fn: path[2] } }) - } else { - browse({ target: { dir: path[1], fn: null } }) - } - break - case 'saved': - loadSaved() - break - default: - break - } - } - function keydown(e) { - switch (e.keyCode) { - case 27: // escape - panic() - break - default: - break - } - } - - // load search results - function loadResults(data) { - console.log(data) - if (!data.query.url) return - // console.log(data) - document.body.className = 'searching' - const path = getPathFromImage(data.query.url) - pushState('searching', "/search/fetch/?url=" + path.url) - if (path.dir === 'uploaded' && path.fn.match('_filename')) { - loadMessage( - "<a href='javascript:history.go(-1)'>< Back</a> | " - + "Searching subregion, " - + "found " + data.results.length + " images" - ) - } else { - loadMessage( - "Found " + data.results.length + " images" - ) - } - loadQuery(data.query.url) - if (!data.results.length) { - results_el.innerHTML = "No results" - return - } - const saved = window.store.get('saved', []) - - results_el.innerHTML = data.results.map(res => { - const { distance, file, hash, frame, url } = res - const isSaved = saved.indexOf(url) !== -1 - const { type } = getPathFromImage(url) - let className = isSaved ? 'saved' : '' - className += ' ' + type - let t = result_template - .replace('{score}', Math.floor(clamp(1 - distance, 0, 1) * 100) + "%") - .replace('{browse}', '/search/q/' + hash) - .replace('{search}', '/search/view/' + [file, hash, frame].join('/')) - .replace('{metadata}', '/metadata/' + hash) - .replace('{className}', className) - .replace('{saved_msg}', isSaved ? 'Saved' : 'Save') - .replace('{img}', url) - return t - }).join('') - } - - function loadDirectory(data) { - console.log(data) - document.body.className = 'browsing' - pushState('searching', "/search/q/" + data.path) - loadMessage("Video: <b>" + data.path + "</b>") - loadQuery("") - if (!data.results.length) { - results_el.innerHTML = "No frames found" - return - } - const saved = window.store.get('saved', []) - results_el.innerHTML = data.results - .map(result => [parseInt(result.frame, 10), result]) - .sort((a, b) => a[0] - b[0]) - .map(pair => { - let { file, hash, frame, url } = pair[1] - const isSaved = saved.indexOf(url) !== -1 - let className = isSaved ? 'saved' : '' - let t = result_template - .replace('{img}', url) - .replace('{browse}', '/search/q/' + hash) - .replace('{search}', '/search/view/' + [file, hash, frame].join('/')) - .replace('{metadata}', '/metadata/' + hash) - .replace('{className}', className) - .replace('{saved_msg}', isSaved ? 'Saved' : 'Save') - return t - }).join('') - } - function loadSaved() { - document.body.className = 'saving' - pushState('View saved', "/search/saved") - const saved = window.store.get('saved', []) - cropping = false - loadMessage(saved.length + " saved image" + (saved.length === 1 ? "" : "s")) - loadQuery('') - const box_el = document.querySelector('.box') - if (box_el) box_el.parentNode.removeChild(box_el) - results_el.innerHTML = saved.map(href => { - const { url, dir } = getPathFromImage({ src: href }) - let className = 'saved' - let t = result_template - .replace('{img}', href) - .replace('{browse}', '/search/q/' + dir) - .replace('{search}', '/search/fetch/?url=' + url) - .replace('{metadata}', '/metadata/' + dir) - .replace('{className}', className) - .replace('{saved_msg}', 'Saved') - return t - }).join('') - } - function loadQuery(path) { - if (cropping) return - const qd = document.querySelector('.query div') - qd.innerHTML = '' - if (path.match(/(gif|jpe?g|png)$/)) { - const img = new Image() - img.setAttribute('crossorigin', 'anonymous') - img.src = path.replace('sm', 'md') - qd.appendChild(img) - } else { - qd.innerHTML = path || "" - } - } - function loadMessage(msg) { - document.querySelector('.query .msg').innerHTML = msg - } - - // panic button - function panic() { - loadMessage('Query cleared') - loadQuery('') - results_el.innerHTML = '' - } - - // adding stuff to localstorage - function save(e) { - const { url } = getPathFromImage(e.target) - const saved = window.store.get('saved', []) - let newList = saved || [] - if (saved.indexOf(url) !== -1) { - newList = saved.filter(f => f !== url) - e.target.parentNode.classList.remove('saved') - } else { - newList.push(url) - e.target.parentNode.classList.add('saved') - } - window.store.set('saved', newList) - } - function reset() { - const shouldReset = window.confirm("This will reset the saved images. Are you sure?") - if (!shouldReset) return - window.store.set('saved', []) - loadSaved() - document.querySelector('[name=title]').value = '' - window.alert("Reset saved images") - } - - // submit the new group - function createNewGroup() { - const title = document.querySelector('[name=title]').value.trim().replace(/[^-_a-zA-Z0-9 ]/g, "") - const saved = window.store.get('saved', []) - const graphic = document.querySelector('[name=graphic]').checked - if (!title.length) return alert("Please enter a title for this group") - if (!saved.length) return alert("Please pick some images to save") - if (!did_check) { - alert('Automatically checking for duplicates. Please doublecheck your selection.') - return check() - } - if (creating) return null - creating = true - return http_post("/api/images/import/new/", { - title, - graphic, - saved - }).then(res => { - console.log(res) - window.store.set('saved', []) - window.location.href = '/groups/show/' + res.image_group.id - }).catch(res => { - alert('Error creating group. The server response is logged to the console.') - console.log(res) - creating = false - }) - } - - // api queries - function login() { - const isLocal = (window.location.hostname === '0.0.0.0') - try { - // csrftoken = "test" // getCookie('csrftoken') - const auth = JSON.parse(window.store.get('persist:root').auth) - token = auth.token - username = auth.user.username - if (!token && !isLocal) { - window.location.href = '/' - } - } catch (e) { - if (!isLocal) { - window.location.href = '/' - } - } - document.querySelector('.logged-in .capitalize').innerHTML = username || 'user' - } - - function upload(e) { - cropping = false - const files = e.dataTransfer ? e.dataTransfer.files : e.target.files - let i, f - for (i = 0, f; i < files.length; i++) { - f = files[i] - if (f && f.type.match('image.*')) break - } - if (!f) return - do_upload(f) - } - - function do_upload(f) { - const fd = new FormData() - fd.append('query_img', f) - document.body.className = 'loading' - http_post('/search/api/upload', fd).then(loadResults) - } - - function upload_again() { - const { files } = document.querySelector('input[type=file]') - if (!files.length) { - window.alert('Please upload a file.') - return - } - upload({ - dataTransfer: { files } - }) - } - - function search(e) { - if (e.length) return search_by_vector(e) - const { url } = getPath(e.target) - cropping = false - document.body.className = 'loading' - loadQuery(url) - loadMessage('Loading results...') - http_get('/search/api/fetch/?url=' + url).then(loadResults) - } - - function search_by_vector(e) { - cropping = false - document.body.className = 'loading' - loadQuery('') - loadMessage('Loading results...') - http_get('/search/api/search/' + e.join('/')).then(loadResults) - } - - function browse(e) { - document.body.className = 'loading' - cropping = false - let dir; - if (e.target.dir) { - dir = e.target.dir - } - else { - const href = e.target.parentNode.href - dir = href.split('/')[5] - console.log(href, dir) - } - loadMessage('Listing video...') - http_get('/search/api/list/' + dir).then(loadDirectory) - } - - function check() { - http_post('/api/images/import/search/', { - saved: window.store.get('saved') || [], - }).then(res => { - console.log(res) - const { good, bad } = res - did_check = true - window.store.set('saved', good) - if (!bad.length) { - return alert("No duplicates found.") - } - bad.forEach(path => { - const el = document.querySelector('img[src="' + path + '"]') - if (el) el.parentNode.classList.remove('saved') - }) - return alert("Untagged " + bad.length + " duplicate" + (bad.length === 1 ? "" : "s") + ".") - }) - } - - function random() { - http_get('/search/api/random').then(loadResults) - } - - // drawing a box - function down(e) { - e.preventDefault() - dragging = true - bounds = query_div.querySelector('img').getBoundingClientRect() - mouse_x = e.pageX - mouse_y = e.pageY - x = mouse_x - bounds.left - y = mouse_y - bounds.top - dx = dy = 0 - box = document.querySelector('.box') || document.createElement('div') - box.className = 'box' - box.style.left = x + 'px' - box.style.top = y + 'px' - box.style.width = 0 + 'px' - box.style.height = 0 + 'px' - query_div.appendChild(box) - } - function move(e) { - if (!dragging) return - e.preventDefault() - dx = clamp(e.pageX - mouse_x, 0, bounds.width - x) - dy = clamp(e.pageY - mouse_y, 0, bounds.height - y) - box.style.width = dx + 'px' - box.style.height = dy + 'px' - } - function up(e) { - if (!dragging) return - dragging = false - e.preventDefault() - const img = query_div.querySelector('img') - const canvas = query_div.querySelector('canvas') || document.createElement('canvas') - const ctx = canvas.getContext('2d') - const ratio = img.naturalWidth / bounds.width - canvas.width = dx * ratio - canvas.height = dy * ratio - if (dx < 10 || dy < 10) { - if (canvas.parentNode) canvas.parentNode.removeChild(canvas) - const box_el = document.querySelector('.box') - if (box_el) box_el.parentNode.removeChild(box_el) - return - } - query_div.appendChild(canvas) - ctx.drawImage( - img, - x * ratio, - y * ratio, - dx * ratio, - dy * ratio, - 0, 0, canvas.width, canvas.height - ) - cropping = true - const blob = window.dataUriToBlob(canvas.toDataURL('image/jpeg', 0.9)) - do_upload(blob) - } - - // utility functions - function http_get(url) { - return fetch(url).then(res => res.json()) - } - function http_post(url, data) { - let headers - if (data instanceof FormData) { - headers = { - Accept: 'application/json, application/xml, text/play, text/html, *.*', - Authorization: 'Token ' + token, - } - } else { - headers = { - Accept: 'application/json, application/xml, text/play, text/html, *.*', - 'Content-Type': 'application/json; charset=utf-8', - Authorization: 'Token ' + token, - } - data = JSON.stringify(data) - } - - // headers['X-CSRFToken'] = csrftoken - return fetch(url, { - method: 'POST', - body: data, - credentials: 'include', - headers, - }).then(res => res.json()) - } - function on(evt, sel, handler) { - document.addEventListener(evt, function (event) { - let t = event.target - while (t && t !== this) { - if (t.matches(sel)) { - handler.call(t, event) - } - t = t.parentNode - } - }) - } - function getPathFromImage(el) { - const url = el.src ? el.src : el - const partz = url.split('/') - let type, dir, fn - if (partz.length === 3) { - type = 'photo' - dir = '' - fn = '' - } - if (partz.length === 9) { - type = 'photo' - dir = partz[6] - fn = '' - } else if (partz.length === 10) { - type = 'video' - dir = partz[6] - fn = partz[7] - } - return { type, dir, fn, url } - } - function getPath(el) { - if (el.url) { - return getPathFromImage(el.url) - } if (el.dir) { - return el - } - el = el.parentNode.parentNode.parentNode.querySelector('img') - return getPathFromImage(el) - } - function pushState(txt, path) { - if (window.location.pathname === path) return - console.log('pushstate', path) - window.history.pushState({}, txt, path) - } - function preventDefault(e) { - if (e && !e.target.classList.contains('metadata')) { - e.preventDefault() - } - } - function clamp(n, a, b) { return n < a ? a : n < b ? n : b } - - // initialize the app when the DOM is ready - document.addEventListener('DOMContentLoaded', init) -} - -loadApp() diff --git a/old/faiss/static/js/dataUriToBlob.js b/old/faiss/static/js/dataUriToBlob.js deleted file mode 100644 index 80189b8d..00000000 --- a/old/faiss/static/js/dataUriToBlob.js +++ /dev/null @@ -1,58 +0,0 @@ -var dataUriToUint8Array = function(uri){ - var data = uri.split(',')[1]; - var bytes = atob(data); - var buf = new ArrayBuffer(bytes.length); - var u8 = new Uint8Array(buf); - for (var i = 0; i < bytes.length; i++) { - u8[i] = bytes.charCodeAt(i); - } - return u8 -} - -window.dataUriToBlob = (function(){ -/** - * Blob constructor. - */ - -var Blob = window.Blob; - -/** - * ArrayBufferView support. - */ - -var hasArrayBufferView = new Blob([new Uint8Array(100)]).size == 100; - -/** - * Return a `Blob` for the given data `uri`. - * - * @param {String} uri - * @return {Blob} - * @api public - */ - -var dataUriToBlob = function(uri){ - var data = uri.split(',')[1]; - var bytes = atob(data); - var buf = new ArrayBuffer(bytes.length); - var arr = new Uint8Array(buf); - for (var i = 0; i < bytes.length; i++) { - arr[i] = bytes.charCodeAt(i); - } - - if (!hasArrayBufferView) arr = buf; - var blob = new Blob([arr], { type: mime(uri) }); - blob.slice = blob.slice || blob.webkitSlice; - return blob; -}; - -/** - * Return data uri mime type. - */ - -function mime(uri) { - return uri.split(';')[0].slice(5); -} - -return dataUriToBlob; - -})() diff --git a/old/faiss/static/js/metadata-app.js b/old/faiss/static/js/metadata-app.js deleted file mode 100644 index fa2265fa..00000000 --- a/old/faiss/static/js/metadata-app.js +++ /dev/null @@ -1,50 +0,0 @@ -!function(e){var t={};function n(r){if(t[r])return t[r].exports;var o=t[r]={i:r,l:!1,exports:{}};return e[r].call(o.exports,o,o.exports,n),o.l=!0,o.exports}n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{configurable:!1,enumerable:!0,get:r})},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="",n(n.s=212)}([function(e,t,n){var r=n(99),o=36e5,i=6e4,a=2,u=/[T ]/,s=/:/,c=/^(\d{2})$/,l=[/^([+-]\d{2})$/,/^([+-]\d{3})$/,/^([+-]\d{4})$/],f=/^(\d{4})/,d=[/^([+-]\d{4})/,/^([+-]\d{5})/,/^([+-]\d{6})/],p=/^-(\d{2})$/,h=/^-?(\d{3})$/,m=/^-?(\d{2})-?(\d{2})$/,v=/^-?W(\d{2})$/,y=/^-?W(\d{2})-?(\d{1})$/,g=/^(\d{2}([.,]\d*)?)$/,_=/^(\d{2}):?(\d{2}([.,]\d*)?)$/,b=/^(\d{2}):?(\d{2}):?(\d{2}([.,]\d*)?)$/,w=/([Z+-].*)$/,x=/^(Z)$/,E=/^([+-])(\d{2})$/,O=/^([+-])(\d{2}):?(\d{2})$/;function S(e,t,n){t=t||0,n=n||0;var r=new Date(0);r.setUTCFullYear(e,0,4);var o=7*t+n+1-(r.getUTCDay()||7);return r.setUTCDate(r.getUTCDate()+o),r}e.exports=function(e,t){if(r(e))return new Date(e.getTime());if("string"!=typeof e)return new Date(e);var n=(t||{}).additionalDigits;n=null==n?a:Number(n);var T=function(e){var t,n={},r=e.split(u);if(s.test(r[0])?(n.date=null,t=r[0]):(n.date=r[0],t=r[1]),t){var o=w.exec(t);o?(n.time=t.replace(o[1],""),n.timezone=o[1]):n.time=t}return n}(e),k=function(e,t){var n,r=l[t],o=d[t];if(n=f.exec(e)||o.exec(e)){var i=n[1];return{year:parseInt(i,10),restDateString:e.slice(i.length)}}if(n=c.exec(e)||r.exec(e)){var a=n[1];return{year:100*parseInt(a,10),restDateString:e.slice(a.length)}}return{year:null}}(T.date,n),R=k.year,j=function(e,t){if(null===t)return null;var n,r,o,i;if(0===e.length)return(r=new Date(0)).setUTCFullYear(t),r;if(n=p.exec(e))return r=new Date(0),o=parseInt(n[1],10)-1,r.setUTCFullYear(t,o),r;if(n=h.exec(e)){r=new Date(0);var a=parseInt(n[1],10);return r.setUTCFullYear(t,0,a),r}if(n=m.exec(e)){r=new Date(0),o=parseInt(n[1],10)-1;var u=parseInt(n[2],10);return r.setUTCFullYear(t,o,u),r}if(n=v.exec(e))return i=parseInt(n[1],10)-1,S(t,i);if(n=y.exec(e)){i=parseInt(n[1],10)-1;var s=parseInt(n[2],10)-1;return S(t,i,s)}return null}(k.restDateString,R);if(j){var P,C=j.getTime(),M=0;return T.time&&(M=function(e){var t,n,r;if(t=g.exec(e))return(n=parseFloat(t[1].replace(",",".")))%24*o;if(t=_.exec(e))return n=parseInt(t[1],10),r=parseFloat(t[2].replace(",",".")),n%24*o+r*i;if(t=b.exec(e)){n=parseInt(t[1],10),r=parseInt(t[2],10);var a=parseFloat(t[3].replace(",","."));return n%24*o+r*i+1e3*a}return null}(T.time)),T.timezone?P=function(e){var t,n;return(t=x.exec(e))?0:(t=E.exec(e))?(n=60*parseInt(t[2],10),"+"===t[1]?-n:n):(t=O.exec(e))?(n=60*parseInt(t[2],10)+parseInt(t[3],10),"+"===t[1]?-n:n):0}(T.timezone):(P=new Date(C+M).getTimezoneOffset(),P=new Date(C+M+P*i).getTimezoneOffset()),new Date(C+M+P*i)}return new Date(e)}},function(e,t,n){"use strict";e.exports=n(213)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=n(220),o=n(117),i=n(224);n.d(t,"Provider",function(){return r.b}),n.d(t,"createProvider",function(){return r.a}),n.d(t,"connectAdvanced",function(){return o.a}),n.d(t,"connect",function(){return i.a})},function(e,t){var n;n=function(){return this}();try{n=n||Function("return this")()||(0,eval)("this")}catch(e){"object"==typeof window&&(n=window)}e.exports=n},function(e,t,n){"use strict";t.__esModule=!0;var r=function(e){return e&&e.__esModule?e:{default:e}}(n(340));t.default=r.default||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e}},function(e,t,n){e.exports={default:n(243),__esModule:!0}},function(e,t,n){"use strict";t.__esModule=!0,t.default=function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}},function(e,t,n){"use strict";t.__esModule=!0;var r=function(e){return e&&e.__esModule?e:{default:e}}(n(126));t.default=function(){function e(e,t){for(var n=0;n<t.length;n++){var o=t[n];o.enumerable=o.enumerable||!1,o.configurable=!0,"value"in o&&(o.writable=!0),(0,r.default)(e,o.key,o)}}return function(t,n,r){return n&&e(t.prototype,n),r&&e(t,r),t}}()},function(e,t,n){"use strict";t.__esModule=!0;var r=function(e){return e&&e.__esModule?e:{default:e}}(n(78));t.default=function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!==(void 0===t?"undefined":(0,r.default)(t))&&"function"!=typeof t?e:t}},function(e,t,n){"use strict";t.__esModule=!0;var r=a(n(266)),o=a(n(270)),i=a(n(78));function a(e){return e&&e.__esModule?e:{default:e}}t.default=function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+(void 0===t?"undefined":(0,i.default)(t)));e.prototype=(0,o.default)(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(r.default?(0,r.default)(e,t):e.__proto__=t)}},function(e,t){var n=e.exports={version:"2.5.7"};"number"==typeof __e&&(__e=n)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.Video=t.Keyframes=t.Keyframe=t.DetectionBoxes=t.DetectionList=t.Classifier=t.ActiveLink=t.TableCell=t.TableRow=t.TableTuples=t.TableArray=t.TableObject=t.Gate=t.Loader=t.Sidebar=t.Footer=t.Header=void 0;var r=v(n(287)),o=v(n(318)),i=v(n(330)),a=v(n(334)),u=v(n(335)),s=v(n(336)),c=v(n(337)),l=v(n(338)),f=v(n(339)),d=v(n(344)),p=v(n(357)),h=v(n(497)),m=n(498);function v(e){return e&&e.__esModule?e:{default:e}}n(500),t.Header=r.default,t.Footer=s.default,t.Sidebar=l.default,t.Loader=c.default,t.Gate=f.default,t.TableObject=m.TableObject,t.TableArray=m.TableArray,t.TableTuples=m.TableTuples,t.TableRow=m.TableRow,t.TableCell=m.TableCell,t.ActiveLink=o.default,t.Classifier=i.default,t.DetectionList=u.default,t.DetectionBoxes=a.default,t.Keyframe=d.default,t.Keyframes=p.default,t.Video=h.default},function(e,t,n){e.exports=n(221)()},function(e,t){var n=e.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=n)},function(e,t,n){var r=n(75)("wks"),o=n(55),i=n(13).Symbol,a="function"==typeof i;(e.exports=function(e){return r[e]||(r[e]=a&&i[e]||(a?i:o)("Symbol."+e))}).store=r},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),n.d(t,"createStore",function(){return s}),n.d(t,"combineReducers",function(){return l}),n.d(t,"bindActionCreators",function(){return d}),n.d(t,"applyMiddleware",function(){return h}),n.d(t,"compose",function(){return p}),n.d(t,"__DO_NOT_USE__ActionTypes",function(){return o});var r=n(227),o={INIT:"@@redux/INIT"+Math.random().toString(36).substring(7).split("").join("."),REPLACE:"@@redux/REPLACE"+Math.random().toString(36).substring(7).split("").join(".")},i="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},a=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};function u(e){if("object"!==(void 0===e?"undefined":i(e))||null===e)return!1;for(var t=e;null!==Object.getPrototypeOf(t);)t=Object.getPrototypeOf(t);return Object.getPrototypeOf(e)===t}function s(e,t,n){var a;if("function"==typeof t&&void 0===n&&(n=t,t=void 0),void 0!==n){if("function"!=typeof n)throw new Error("Expected the enhancer to be a function.");return n(s)(e,t)}if("function"!=typeof e)throw new Error("Expected the reducer to be a function.");var c=e,l=t,f=[],d=f,p=!1;function h(){d===f&&(d=f.slice())}function m(){if(p)throw new Error("You may not call store.getState() while the reducer is executing. The reducer has already received the state as an argument. Pass it down from the top reducer instead of reading it from the store.");return l}function v(e){if("function"!=typeof e)throw new Error("Expected the listener to be a function.");if(p)throw new Error("You may not call store.subscribe() while the reducer is executing. If you would like to be notified after the store has been updated, subscribe from a component and invoke store.getState() in the callback to access the latest state. See https://redux.js.org/api-reference/store#subscribe(listener) for more details.");var t=!0;return h(),d.push(e),function(){if(t){if(p)throw new Error("You may not unsubscribe from a store listener while the reducer is executing. See https://redux.js.org/api-reference/store#subscribe(listener) for more details.");t=!1,h();var n=d.indexOf(e);d.splice(n,1)}}}function y(e){if(!u(e))throw new Error("Actions must be plain objects. Use custom middleware for async actions.");if(void 0===e.type)throw new Error('Actions may not have an undefined "type" property. Have you misspelled a constant?');if(p)throw new Error("Reducers may not dispatch actions.");try{p=!0,l=c(l,e)}finally{p=!1}for(var t=f=d,n=0;n<t.length;n++){(0,t[n])()}return e}return y({type:o.INIT}),(a={dispatch:y,subscribe:v,getState:m,replaceReducer:function(e){if("function"!=typeof e)throw new Error("Expected the nextReducer to be a function.");c=e,y({type:o.REPLACE})}})[r.a]=function(){var e,t=v;return(e={subscribe:function(e){if("object"!==(void 0===e?"undefined":i(e))||null===e)throw new TypeError("Expected the observer to be an object.");function n(){e.next&&e.next(m())}return n(),{unsubscribe:t(n)}}})[r.a]=function(){return this},e},a}function c(e,t){var n=t&&t.type;return"Given "+(n&&'action "'+String(n)+'"'||"an action")+', reducer "'+e+'" returned undefined. To ignore an action, you must explicitly return the previous state. If you want this reducer to hold no value, you can return null instead of undefined.'}function l(e){for(var t=Object.keys(e),n={},r=0;r<t.length;r++){var i=t[r];0,"function"==typeof e[i]&&(n[i]=e[i])}var a=Object.keys(n);var u=void 0;try{!function(e){Object.keys(e).forEach(function(t){var n=e[t];if(void 0===n(void 0,{type:o.INIT}))throw new Error('Reducer "'+t+"\" returned undefined during initialization. If the state passed to the reducer is undefined, you must explicitly return the initial state. The initial state may not be undefined. If you don't want to set a value for this reducer, you can use null instead of undefined.");if(void 0===n(void 0,{type:"@@redux/PROBE_UNKNOWN_ACTION_"+Math.random().toString(36).substring(7).split("").join(".")}))throw new Error('Reducer "'+t+"\" returned undefined when probed with a random type. Don't try to handle "+o.INIT+' or other actions in "redux/*" namespace. They are considered private. Instead, you must return the current state for any unknown actions, unless it is undefined, in which case you must return the initial state, regardless of the action type. The initial state may not be undefined, but can be null.')})}(n)}catch(e){u=e}return function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=arguments[1];if(u)throw u;for(var r=!1,o={},i=0;i<a.length;i++){var s=a[i],l=n[s],f=e[s],d=l(f,t);if(void 0===d){var p=c(s,t);throw new Error(p)}o[s]=d,r=r||d!==f}return r?o:e}}function f(e,t){return function(){return t(e.apply(this,arguments))}}function d(e,t){if("function"==typeof e)return f(e,t);if("object"!==(void 0===e?"undefined":i(e))||null===e)throw new Error("bindActionCreators expected an object or a function, instead received "+(null===e?"null":void 0===e?"undefined":i(e))+'. Did you write "import ActionCreators from" instead of "import * as ActionCreators from"?');for(var n=Object.keys(e),r={},o=0;o<n.length;o++){var a=n[o],u=e[a];"function"==typeof u&&(r[a]=f(u,t))}return r}function p(){for(var e=arguments.length,t=Array(e),n=0;n<e;n++)t[n]=arguments[n];return 0===t.length?function(e){return e}:1===t.length?t[0]:t.reduce(function(e,t){return function(){return e(t.apply(void 0,arguments))}})}function h(){for(var e=arguments.length,t=Array(e),n=0;n<e;n++)t[n]=arguments[n];return function(e){return function(){for(var n=arguments.length,r=Array(n),o=0;o<n;o++)r[o]=arguments[o];var i=e.apply(void 0,r),u=function(){throw new Error("Dispatching while constructing your middleware is not allowed. Other middleware would not be applied to this dispatch.")},s={getState:i.getState,dispatch:function(){return u.apply(void 0,arguments)}},c=t.map(function(e){return e(s)});return u=p.apply(void 0,c)(i.dispatch),a({},i,{dispatch:u})}}}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=n(319);n.d(t,"BrowserRouter",function(){return r.a});var o=n(320);n.d(t,"HashRouter",function(){return o.a});var i=n(152);n.d(t,"Link",function(){return i.a});var a=n(321);n.d(t,"MemoryRouter",function(){return a.a});var u=n(322);n.d(t,"NavLink",function(){return u.a});var s=n(323);n.d(t,"Prompt",function(){return s.a});var c=n(324);n.d(t,"Redirect",function(){return c.a});var l=n(153);n.d(t,"Route",function(){return l.a});var f=n(92);n.d(t,"Router",function(){return f.a});var d=n(325);n.d(t,"StaticRouter",function(){return d.a});var p=n(326);n.d(t,"Switch",function(){return p.a});var h=n(327);n.d(t,"generatePath",function(){return h.a});var m=n(328);n.d(t,"matchPath",function(){return m.a});var v=n(329);n.d(t,"withRouter",function(){return v.a})},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.login=t.post=t.preloadImage=t.keyframeUri=t.metadataUri=t.imageUrl=t.hashPath=t.clamp=t.px=t.percent=t.timestamp=t.padSeconds=t.courtesyS=t.verify=t.isVerified=t.pad=t.formatName=t.widths=t.isDesktop=t.isMobile=t.isAndroid=t.isiPad=t.isiPhone=void 0;var r=function(e){return e&&e.__esModule?e:{default:e}}(n(93));var o=t.isiPhone=!(!navigator.userAgent.match(/iPhone/i)&&!navigator.userAgent.match(/iPod/i)),i=t.isiPad=!!navigator.userAgent.match(/iPad/i),a=t.isAndroid=!!navigator.userAgent.match(/Android/i),u=t.isMobile=o||i||a,s=t.isDesktop=!u;document.body.parentNode.classList.add(s?"desktop":"mobile");t.widths={th:160,sm:320,md:640,lg:1280};var c="id url cc sa fp md5 sha256".split(" ").map(function(e){return"_"+e}),l=c.map(function(e){return e.toUpperCase()}),f=(t.formatName=function(e){return c.forEach(function(t,n){return e=e.replace(t,l[n])}),e.replace(/_/g," ")},t.pad=function(e,t){for(var n=String(e||0);n.length<t;)n="0"+n;return n}),d=t.isVerified=function(e){return 1===e||"1"===e||"verified"===e},p=(t.verify=function(e){return d(e)?"verified":"unverified"},t.courtesyS=function(e,t){return e+" "+(1===e?t:t+"s")},t.padSeconds=function(e){return e<10?"0"+e:e}),h=(t.timestamp=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:0;e/=arguments.length>1&&void 0!==arguments[1]?arguments[1]:25;var t=p(Math.round(e)%60);return(e=Math.floor(e/60))>60?Math.floor(e/60)+":"+p(e%60)+":"+t:e%60+":"+t},t.percent=function(e){return(100*e).toFixed(1)+"%"},t.px=function(e,t){return Math.round(e*t)+"px"},t.clamp=function(e,t,n){return e<t?t:e<n?e:n},t.hashPath=function(e){if(!e||e.length<9)throw new Error("Invalid sha256");return[e.slice(0,3),e.slice(3,6),e.slice(6,9),e].join("/")}),m=t.imageUrl=function(e,t,n){var r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:"th";return["https://sa-vframe.ams3.digitaloceanspaces.com/v1/media/keyframes",d(e)?null:"unverified",h(t),f(n,6),r,"index.jpg"].filter(function(e){return!!e}).join("/")},v=(t.metadataUri=function(e,t){return"/metadata/"+e+"/"+t+"/"},t.keyframeUri=function(e,t){return"/metadata/"+e+"/keyframe/"+f(t,6)+"/"},t.preloadImage=function(e){var t=e.verified,n=e.hash,r=e.frame,o=e.url;n&&r&&(o=m(t,n,r,"md"));var i=new Image,a=!1;i.onload=function(){a||(a=!0,i.onload=null)},i.crossOrigin="anonymous",i.src=o,i.complete&&i.onload()},null),y="",g="",_=(t.post=function(e,t,n){_();var o=void 0;t instanceof FormData?o={Accept:"application/json, application/xml, text/play, text/html, *.*"}:(o={Accept:"application/json, application/xml, text/play, text/html, *.*","Content-Type":"application/json; charset=utf-8"},t=(0,r.default)(t));var i={method:"POST",body:t,headers:o,credentials:"include"};return n&&(o.Authorization="Token "+y),fetch(e,i).then(function(e){return e.json()})},t.login=function(){if(v)return v;var e="0.0.0.0"===window.location.hostname||"127.0.0.1"===window.location.hostname;try{var t=JSON.parse(JSON.parse(localStorage.getItem("persist:root")).auth);return y=t.token,g=t.user.username,y&&console.log("logged in",g),v=t,y||e||(window.location.href="/"),t}catch(t){return e||(window.location.href="/"),{}}})},function(e,t,n){var r=n(13),o=n(10),i=n(34),a=n(26),u=n(25),s=function(e,t,n){var c,l,f,d=e&s.F,p=e&s.G,h=e&s.S,m=e&s.P,v=e&s.B,y=e&s.W,g=p?o:o[t]||(o[t]={}),_=g.prototype,b=p?r:h?r[t]:(r[t]||{}).prototype;for(c in p&&(n=t),n)(l=!d&&b&&void 0!==b[c])&&u(g,c)||(f=l?b[c]:n[c],g[c]=p&&"function"!=typeof b[c]?n[c]:v&&l?i(f,r):y&&b[c]==f?function(e){var t=function(t,n,r){if(this instanceof e){switch(arguments.length){case 0:return new e;case 1:return new e(t);case 2:return new e(t,n)}return new e(t,n,r)}return e.apply(this,arguments)};return t.prototype=e.prototype,t}(f):m&&"function"==typeof f?i(Function.call,f):f,m&&((g.virtual||(g.virtual={}))[c]=f,e&s.R&&_&&!_[c]&&a(_,c,f)))};s.F=1,s.G=2,s.S=4,s.P=8,s.B=16,s.W=32,s.U=64,s.R=128,e.exports=s},function(e,t,n){"use strict";e.exports=function(e,t,n,r,o,i,a,u){if(!e){var s;if(void 0===t)s=new Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings.");else{var c=[n,r,o,i,a,u],l=0;(s=new Error(t.replace(/%s/g,function(){return c[l++]}))).name="Invariant Violation"}throw s.framesToPop=1,s}}},function(e,t,n){var r=n(23);e.exports=function(e){if(!r(e))throw TypeError(e+" is not an object!");return e}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.random=t.browse=t.search=t.searchByFrame=t.searchByVerifiedFrame=t.upload=t.updateOptions=t.panic=t.publicUrl=void 0;var r=s(n(4)),o=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(39)),i=n(94),a=n(17),u=s(n(354));function s(e){return e&&e.__esModule?e:{default:e}}var c={upload:function(){return"https://syrianarchive.vframe.io/search/api/upload"},search:function(){return"https://syrianarchive.vframe.io/search/api/fetch"},searchByVerifiedFrame:function(e,t,n){return"https://syrianarchive.vframe.io/search/api/search/"+e+"/"+t+"/"+(0,a.pad)(n,6)},searchByFrame:function(e,t){return"https://syrianarchive.vframe.io/search/api/search/"+e+"/"+(0,a.pad)(t,6)},browse:function(e){return"https://syrianarchive.vframe.io/search/api/list/"+e},random:function(){return"https://syrianarchive.vframe.io/search/api/random"},check:function(){return"https://syrianarchive.vframe.io/api/images/import/search"}},l=t.publicUrl={browse:function(e){return"/search/browse/"+e},searchByVerifiedFrame:function(e,t,n){return"/search/keyframe/"+(0,a.verify)(e)+"/"+t+"/"+(0,a.pad)(n,6)},searchByFrame:function(e,t){return"/search/keyframe/"+e+"/"+(0,a.pad)(t,6)},review:function(){return"/search/review/"}},f=function(e,t){return{type:o.search.loading,tag:e,offset:t}},d=function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:0;return{type:o.search.loaded,tag:e,data:t,offset:n}},p=function(e,t){return{type:o.search.error,tag:e,err:t}};t.panic=function(){return function(e){i.history.push("/search/"),e({type:o.search.panic})}},t.updateOptions=function(e){return function(t){t({type:o.search.update_options,opt:e})}},t.upload=function(e,t){return function(n){var o=i.store.getState().search.options,u=new FormData;u.append("query_img",e),u.append("limit",o.perPage),t||n(f("query")),(0,a.post)(c.upload(),u).then(function(e){if(t){var o=e.query.timing;e.query=(0,r.default)({},t,{timing:o});var a={};if(e.query.crop){var u=e.query.crop,s=u.x,c=u.y,l=u.w,f=u.h;a.crop=[s,c,l,f].map(function(e){return parseInt(e,10)}).join(",")}t.url&&!t.hash&&(a.url=t.url)}else e.query.url&&!window.location.search.match(e.query.url)&&i.history.push("/search/?url="+e.query.url);n(d("query",e))}).catch(function(e){return n(p("query",e))})}},t.searchByVerifiedFrame=function(e,t,n){var r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:0;return function(o){var s=i.store.getState().search.options;o(f("query",r));var l=u.default.stringify({limit:s.perPage,offset:r});(0,a.preloadImage)({verified:e,hash:t,frame:n}),fetch(c.searchByVerifiedFrame(e,t,n)+"?"+l,{method:"GET",mode:"cors"}).then(function(e){return e.json()}).then(function(e){return o(d("query",e,r))}).catch(function(e){return o(p("query",e))})}},t.searchByFrame=function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:0;return function(r){var o=i.store.getState().search.options;r(f("query",n));var s=u.default.stringify({limit:o.perPage,offset:n});(0,a.preloadImage)({verified:!1,hash:e,frame:t}),fetch(c.searchByFrame(e,t)+"?"+s,{method:"GET",mode:"cors"}).then(function(e){return e.json()}).then(function(e){return r(d("query",e,n))}).catch(function(e){return r(p("query",e))})}},t.search=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:0;return function(n){var r=i.store.getState().search.options;n(f("query",t));var o=u.default.stringify({url:e,limit:r.perPage,offset:t});0===e.indexOf("static")&&(0,a.preloadImage)({uri:e}),fetch(c.search(e)+"?"+o,{method:"GET",mode:"cors"}).then(function(e){return e.json()}).then(function(e){return n(d("query",e,t))}).catch(function(e){return n(p("query",e))})}},t.browse=function(e){return function(t){var n="browse";t(f(n)),fetch(c[n](e),{method:"GET",mode:"cors"}).then(function(e){return e.json()}).then(function(e){return t(d(n,e))}).catch(function(e){return t(p(n,e))})}},t.random=function(){return function(e){var t=i.store.getState().search.options,n=u.default.stringify({limit:t.perPage});e(f("query")),fetch(c.random()+"?"+n,{method:"GET",mode:"cors"}).then(function(e){return e.json()}).then(function(t){e(d("query",t)),i.history.push(l.searchByVerifiedFrame(t.query.verified,t.query.hash,t.query.frame))}).catch(function(t){return e(p("query",t))})}}},function(e,t,n){var r=n(20),o=n(125),i=n(77),a=Object.defineProperty;t.f=n(24)?Object.defineProperty:function(e,t,n){if(r(e),t=i(t,!0),r(n),o)try{return a(e,t,n)}catch(e){}if("get"in n||"set"in n)throw TypeError("Accessors not supported!");return"value"in n&&(e[t]=n.value),e}},function(e,t){e.exports=function(e){return"object"==typeof e?null!==e:"function"==typeof e}},function(e,t,n){e.exports=!n(35)(function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a})},function(e,t){var n={}.hasOwnProperty;e.exports=function(e,t){return n.call(e,t)}},function(e,t,n){var r=n(22),o=n(43);e.exports=n(24)?function(e,t,n){return r.f(e,t,o(1,n))}:function(e,t,n){return e[t]=n,e}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=n(275);n.d(t,"createBrowserHistory",function(){return r.a});var o=n(278);n.d(t,"createHashHistory",function(){return o.a});var i=n(279);n.d(t,"createMemoryHistory",function(){return i.a});var a=n(62);n.d(t,"createLocation",function(){return a.a}),n.d(t,"locationsAreEqual",function(){return a.b});var u=n(47);n.d(t,"parsePath",function(){return u.d}),n.d(t,"createPath",function(){return u.b})},function(e,t,n){e.exports={default:n(331),__esModule:!0}},function(e,t,n){var r=n(0),o=n(30);e.exports=function(e){var t=r(e),n=t.getFullYear(),i=new Date(0);i.setFullYear(n+1,0,4),i.setHours(0,0,0,0);var a=o(i),u=new Date(0);u.setFullYear(n,0,4),u.setHours(0,0,0,0);var s=o(u);return t.getTime()>=a.getTime()?n+1:t.getTime()>=s.getTime()?n:n-1}},function(e,t,n){var r=n(66);e.exports=function(e){return r(e,{weekStartsOn:1})}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e);return t.setHours(0,0,0,0),t}},function(e,t){"function"==typeof Object.create?e.exports=function(e,t){e.super_=t,e.prototype=Object.create(t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}})}:e.exports=function(e,t){e.super_=t;var n=function(){};n.prototype=t.prototype,e.prototype=new n,e.prototype.constructor=e}},function(e,t,n){"use strict";var r=n(70),o=Object.keys||function(e){var t=[];for(var n in e)t.push(n);return t};e.exports=f;var i=n(54);i.inherits=n(32);var a=n(201),u=n(111);i.inherits(f,a);for(var s=o(u.prototype),c=0;c<s.length;c++){var l=s[c];f.prototype[l]||(f.prototype[l]=u.prototype[l])}function f(e){if(!(this instanceof f))return new f(e);a.call(this,e),u.call(this,e),e&&!1===e.readable&&(this.readable=!1),e&&!1===e.writable&&(this.writable=!1),this.allowHalfOpen=!0,e&&!1===e.allowHalfOpen&&(this.allowHalfOpen=!1),this.once("end",d)}function d(){this.allowHalfOpen||this._writableState.ended||r.nextTick(p,this)}function p(e){e.end()}Object.defineProperty(f.prototype,"writableHighWaterMark",{enumerable:!1,get:function(){return this._writableState.highWaterMark}}),Object.defineProperty(f.prototype,"destroyed",{get:function(){return void 0!==this._readableState&&void 0!==this._writableState&&(this._readableState.destroyed&&this._writableState.destroyed)},set:function(e){void 0!==this._readableState&&void 0!==this._writableState&&(this._readableState.destroyed=e,this._writableState.destroyed=e)}}),f.prototype._destroy=function(e,t){this.push(null),this.end(),r.nextTick(t,e)}},function(e,t,n){var r=n(56);e.exports=function(e,t,n){if(r(e),void 0===t)return e;switch(n){case 1:return function(n){return e.call(t,n)};case 2:return function(n,r){return e.call(t,n,r)};case 3:return function(n,r,o){return e.call(t,n,r,o)}}return function(){return e.apply(t,arguments)}}},function(e,t){e.exports=function(e){try{return!!e()}catch(e){return!0}}},function(e,t){e.exports={}},function(e,t,n){var r=n(130),o=n(73);e.exports=function(e){return r(o(e))}},function(e,t,n){"use strict";var r=function(){};e.exports=r},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=t.asType=function(e,t){return[e,t].join("_").toUpperCase()},o=t.tagAsType=function(e,t){return t.reduce(function(t,n){return t[n]=r(e,n),t},{})};t.metadata=o("metadata",["loading","loaded","loaded_many","error","set_hash"]),t.search=o("search",["loading","loaded","error","panic","update_options"]),t.review=o("review",["loading","loaded","error","save","unsave","refresh","clear","dedupe","create","set_count"]),t.init="@@INIT"},function(e,t){var n,r,o=e.exports={};function i(){throw new Error("setTimeout has not been defined")}function a(){throw new Error("clearTimeout has not been defined")}function u(e){if(n===setTimeout)return setTimeout(e,0);if((n===i||!n)&&setTimeout)return n=setTimeout,setTimeout(e,0);try{return n(e,0)}catch(t){try{return n.call(null,e,0)}catch(t){return n.call(this,e,0)}}}!function(){try{n="function"==typeof setTimeout?setTimeout:i}catch(e){n=i}try{r="function"==typeof clearTimeout?clearTimeout:a}catch(e){r=a}}();var s,c=[],l=!1,f=-1;function d(){l&&s&&(l=!1,s.length?c=s.concat(c):f=-1,c.length&&p())}function p(){if(!l){var e=u(d);l=!0;for(var t=c.length;t;){for(s=c,c=[];++f<t;)s&&s[f].run();f=-1,t=c.length}s=null,l=!1,function(e){if(r===clearTimeout)return clearTimeout(e);if((r===a||!r)&&clearTimeout)return r=clearTimeout,clearTimeout(e);try{r(e)}catch(t){try{return r.call(null,e)}catch(t){return r.call(this,e)}}}(e)}}function h(e,t){this.fun=e,this.array=t}function m(){}o.nextTick=function(e){var t=new Array(arguments.length-1);if(arguments.length>1)for(var n=1;n<arguments.length;n++)t[n-1]=arguments[n];c.push(new h(e,t)),1!==c.length||l||u(p)},h.prototype.run=function(){this.fun.apply(null,this.array)},o.title="browser",o.browser=!0,o.env={},o.argv=[],o.version="",o.versions={},o.on=m,o.addListener=m,o.once=m,o.off=m,o.removeListener=m,o.removeAllListeners=m,o.emit=m,o.prependListener=m,o.prependOnceListener=m,o.listeners=function(e){return[]},o.binding=function(e){throw new Error("process.binding is not supported")},o.cwd=function(){return"/"},o.chdir=function(e){throw new Error("process.chdir is not supported")},o.umask=function(){return 0}},function(e,t,n){var r=n(73);e.exports=function(e){return Object(r(e))}},function(e,t){e.exports=!0},function(e,t){e.exports=function(e,t){return{enumerable:!(1&e),configurable:!(2&e),writable:!(4&e),value:t}}},function(e,t,n){"use strict";var r=n(249)(!0);n(127)(String,"String",function(e){this._t=String(e),this._i=0},function(){var e,t=this._t,n=this._i;return n>=t.length?{value:void 0,done:!0}:(e=r(t,n),this._i+=e.length,{value:e,done:!1})})},function(e,t,n){var r=n(129),o=n(82);e.exports=Object.keys||function(e){return r(e,o)}},function(e,t){var n={}.toString;e.exports=function(e){return n.call(e).slice(8,-1)}},function(e,t,n){"use strict";n.d(t,"a",function(){return r}),n.d(t,"f",function(){return o}),n.d(t,"c",function(){return i}),n.d(t,"e",function(){return a}),n.d(t,"g",function(){return u}),n.d(t,"d",function(){return s}),n.d(t,"b",function(){return c});var r=function(e){return"/"===e.charAt(0)?e:"/"+e},o=function(e){return"/"===e.charAt(0)?e.substr(1):e},i=function(e,t){return new RegExp("^"+t+"(\\/|\\?|#|$)","i").test(e)},a=function(e,t){return i(e,t)?e.substr(t.length):e},u=function(e){return"/"===e.charAt(e.length-1)?e.slice(0,-1):e},s=function(e){var t=e||"/",n="",r="",o=t.indexOf("#");-1!==o&&(r=t.substr(o),t=t.substr(0,o));var i=t.indexOf("?");return-1!==i&&(n=t.substr(i),t=t.substr(0,i)),{pathname:t,search:"?"===n?"":n,hash:"#"===r?"":r}},c=function(e){var t=e.pathname,n=e.search,r=e.hash,o=t||"/";return n&&"?"!==n&&(o+="?"===n.charAt(0)?n:"?"+n),r&&"#"!==r&&(o+="#"===r.charAt(0)?r:"#"+r),o}},function(e,t){e.exports=function(e){var t=[];return t.toString=function(){return this.map(function(t){var n=function(e,t){var n=e[1]||"",r=e[3];if(!r)return n;if(t&&"function"==typeof btoa){var o=function(e){return"/*# sourceMappingURL=data:application/json;charset=utf-8;base64,"+btoa(unescape(encodeURIComponent(JSON.stringify(e))))+" */"}(r),i=r.sources.map(function(e){return"/*# sourceURL="+r.sourceRoot+e+" */"});return[n].concat(i).concat([o]).join("\n")}return[n].join("\n")}(t,e);return t[2]?"@media "+t[2]+"{"+n+"}":n}).join("")},t.i=function(e,n){"string"==typeof e&&(e=[[null,e,""]]);for(var r={},o=0;o<this.length;o++){var i=this[o][0];"number"==typeof i&&(r[i]=!0)}for(o=0;o<e.length;o++){var a=e[o];"number"==typeof a[0]&&r[a[0]]||(n&&!a[2]?a[2]=n:n&&(a[2]="("+a[2]+") and ("+n+")"),t.push(a))}},t}},function(e,t,n){var r={},o=function(e){var t;return function(){return void 0===t&&(t=e.apply(this,arguments)),t}}(function(){return window&&document&&document.all&&!window.atob}),i=function(e){var t={};return function(e){if("function"==typeof e)return e();if(void 0===t[e]){var n=function(e){return document.querySelector(e)}.call(this,e);if(window.HTMLIFrameElement&&n instanceof window.HTMLIFrameElement)try{n=n.contentDocument.head}catch(e){n=null}t[e]=n}return t[e]}}(),a=null,u=0,s=[],c=n(317);function l(e,t){for(var n=0;n<e.length;n++){var o=e[n],i=r[o.id];if(i){i.refs++;for(var a=0;a<i.parts.length;a++)i.parts[a](o.parts[a]);for(;a<o.parts.length;a++)i.parts.push(v(o.parts[a],t))}else{var u=[];for(a=0;a<o.parts.length;a++)u.push(v(o.parts[a],t));r[o.id]={id:o.id,refs:1,parts:u}}}}function f(e,t){for(var n=[],r={},o=0;o<e.length;o++){var i=e[o],a=t.base?i[0]+t.base:i[0],u={css:i[1],media:i[2],sourceMap:i[3]};r[a]?r[a].parts.push(u):n.push(r[a]={id:a,parts:[u]})}return n}function d(e,t){var n=i(e.insertInto);if(!n)throw new Error("Couldn't find a style target. This probably means that the value for the 'insertInto' parameter is invalid.");var r=s[s.length-1];if("top"===e.insertAt)r?r.nextSibling?n.insertBefore(t,r.nextSibling):n.appendChild(t):n.insertBefore(t,n.firstChild),s.push(t);else if("bottom"===e.insertAt)n.appendChild(t);else{if("object"!=typeof e.insertAt||!e.insertAt.before)throw new Error("[Style Loader]\n\n Invalid value for parameter 'insertAt' ('options.insertAt') found.\n Must be 'top', 'bottom', or Object.\n (https://github.com/webpack-contrib/style-loader#insertat)\n");var o=i(e.insertInto+" "+e.insertAt.before);n.insertBefore(t,o)}}function p(e){if(null===e.parentNode)return!1;e.parentNode.removeChild(e);var t=s.indexOf(e);t>=0&&s.splice(t,1)}function h(e){var t=document.createElement("style");return void 0===e.attrs.type&&(e.attrs.type="text/css"),m(t,e.attrs),d(e,t),t}function m(e,t){Object.keys(t).forEach(function(n){e.setAttribute(n,t[n])})}function v(e,t){var n,r,o,i;if(t.transform&&e.css){if(!(i=t.transform(e.css)))return function(){};e.css=i}if(t.singleton){var s=u++;n=a||(a=h(t)),r=g.bind(null,n,s,!1),o=g.bind(null,n,s,!0)}else e.sourceMap&&"function"==typeof URL&&"function"==typeof URL.createObjectURL&&"function"==typeof URL.revokeObjectURL&&"function"==typeof Blob&&"function"==typeof btoa?(n=function(e){var t=document.createElement("link");return void 0===e.attrs.type&&(e.attrs.type="text/css"),e.attrs.rel="stylesheet",m(t,e.attrs),d(e,t),t}(t),r=function(e,t,n){var r=n.css,o=n.sourceMap,i=void 0===t.convertToAbsoluteUrls&&o;(t.convertToAbsoluteUrls||i)&&(r=c(r));o&&(r+="\n/*# sourceMappingURL=data:application/json;base64,"+btoa(unescape(encodeURIComponent(JSON.stringify(o))))+" */");var a=new Blob([r],{type:"text/css"}),u=e.href;e.href=URL.createObjectURL(a),u&&URL.revokeObjectURL(u)}.bind(null,n,t),o=function(){p(n),n.href&&URL.revokeObjectURL(n.href)}):(n=h(t),r=function(e,t){var n=t.css,r=t.media;r&&e.setAttribute("media",r);if(e.styleSheet)e.styleSheet.cssText=n;else{for(;e.firstChild;)e.removeChild(e.firstChild);e.appendChild(document.createTextNode(n))}}.bind(null,n),o=function(){p(n)});return r(e),function(t){if(t){if(t.css===e.css&&t.media===e.media&&t.sourceMap===e.sourceMap)return;r(e=t)}else o()}}e.exports=function(e,t){if("undefined"!=typeof DEBUG&&DEBUG&&"object"!=typeof document)throw new Error("The style-loader cannot be used in a non-browser environment");(t=t||{}).attrs="object"==typeof t.attrs?t.attrs:{},t.singleton||"boolean"==typeof t.singleton||(t.singleton=o()),t.insertInto||(t.insertInto="head"),t.insertAt||(t.insertAt="bottom");var n=f(e,t);return l(n,t),function(e){for(var o=[],i=0;i<n.length;i++){var a=n[i];(u=r[a.id]).refs--,o.push(u)}e&&l(f(e,t),t);for(i=0;i<o.length;i++){var u;if(0===(u=o[i]).refs){for(var s=0;s<u.parts.length;s++)u.parts[s]();delete r[u.id]}}}};var y=function(){var e=[];return function(t,n){return e[t]=n,e.filter(Boolean).join("\n")}}();function g(e,t,n,r){var o=n?"":r.css;if(e.styleSheet)e.styleSheet.cssText=y(t,o);else{var i=document.createTextNode(o),a=e.childNodes;a[t]&&e.removeChild(a[t]),a.length?e.insertBefore(i,a[t]):e.appendChild(i)}}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=Number(t);return n.setDate(n.getDate()+o),n}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e).getTime(),o=Number(t);return new Date(n+o)}},function(e,t,n){var r=n(29),o=n(30);e.exports=function(e){var t=r(e),n=new Date(0);return n.setFullYear(t,0,4),n.setHours(0,0,0,0),o(n)}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e).getTime(),o=r(t).getTime();return n<o?-1:n>o?1:0}},function(e,t,n){(function(e){function n(e){return Object.prototype.toString.call(e)}t.isArray=function(e){return Array.isArray?Array.isArray(e):"[object Array]"===n(e)},t.isBoolean=function(e){return"boolean"==typeof e},t.isNull=function(e){return null===e},t.isNullOrUndefined=function(e){return null==e},t.isNumber=function(e){return"number"==typeof e},t.isString=function(e){return"string"==typeof e},t.isSymbol=function(e){return"symbol"==typeof e},t.isUndefined=function(e){return void 0===e},t.isRegExp=function(e){return"[object RegExp]"===n(e)},t.isObject=function(e){return"object"==typeof e&&null!==e},t.isDate=function(e){return"[object Date]"===n(e)},t.isError=function(e){return"[object Error]"===n(e)||e instanceof Error},t.isFunction=function(e){return"function"==typeof e},t.isPrimitive=function(e){return null===e||"boolean"==typeof e||"number"==typeof e||"string"==typeof e||"symbol"==typeof e||void 0===e},t.isBuffer=e.isBuffer}).call(t,n(204).Buffer)},function(e,t){var n=0,r=Math.random();e.exports=function(e){return"Symbol(".concat(void 0===e?"":e,")_",(++n+r).toString(36))}},function(e,t){e.exports=function(e){if("function"!=typeof e)throw TypeError(e+" is not a function!");return e}},function(e,t,n){var r=n(22).f,o=n(25),i=n(14)("toStringTag");e.exports=function(e,t,n){e&&!o(e=n?e:e.prototype,i)&&r(e,i,{configurable:!0,value:t})}},function(e,t,n){n(254);for(var r=n(13),o=n(26),i=n(36),a=n(14)("toStringTag"),u="CSSRuleList,CSSStyleDeclaration,CSSValueList,ClientRectList,DOMRectList,DOMStringList,DOMTokenList,DataTransferItemList,FileList,HTMLAllCollection,HTMLCollection,HTMLFormElement,HTMLSelectElement,MediaList,MimeTypeArray,NamedNodeMap,NodeList,PaintRequestList,Plugin,PluginArray,SVGLengthList,SVGNumberList,SVGPathSegList,SVGPointList,SVGStringList,SVGTransformList,SourceBufferList,StyleSheetList,TextTrackCueList,TextTrackList,TouchList".split(","),s=0;s<u.length;s++){var c=u[s],l=r[c],f=l&&l.prototype;f&&!f[a]&&o(f,a,c),i[c]=i.Array}},function(e,t){t.f={}.propertyIsEnumerable},function(e,t,n){"use strict";(function(e){Object.defineProperty(t,"__esModule",{value:!0});var n="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},r=t.LOCATION_CHANGE="@@router/LOCATION_CHANGE",o=(t.onLocationChanged=function(e,t){return{type:g("LOCATION_CHANGE"),payload:{location:e,action:t}}},t.CALL_HISTORY_METHOD="@@router/CALL_HISTORY_METHOD"),i=function(e){return function(){for(var t=arguments.length,n=Array(t),r=0;r<t;r++)n[r]=arguments[r];return{type:g("CALL_HISTORY_METHOD"),payload:{method:e,args:n}}}},a=t.push=g("updateLocation")("push"),u=t.replace=g("updateLocation")("replace"),s=t.go=g("updateLocation")("go"),c=t.goBack=g("updateLocation")("goBack"),l=t.goForward=g("updateLocation")("goForward");t.routerActions={push:g("push"),replace:g("replace"),go:g("go"),goBack:g("goBack"),goForward:g("goForward")};function f(){try{if(e)return e}catch(e){try{if(window)return window}catch(e){return this}}}var d=null;function p(){if(null===d){var e=f();e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__||(e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__=0),d=__$$GLOBAL_REWIRE_NEXT_MODULE_ID__++}return d}function h(){var e=f();return e.__$$GLOBAL_REWIRE_REGISTRY__||(e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)),__$$GLOBAL_REWIRE_REGISTRY__}function m(){var e=p(),t=h(),n=t[e];return n||(t[e]=Object.create(null),n=t[e]),n}!function(){var e=f();e.__rewire_reset_all__||(e.__rewire_reset_all__=function(){e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)})}();var v="__INTENTIONAL_UNDEFINED__",y={};function g(e){var t=m();if(void 0===t[e])return function(e){switch(e){case"LOCATION_CHANGE":return r;case"CALL_HISTORY_METHOD":return o;case"updateLocation":return i;case"push":return a;case"replace":return u;case"go":return s;case"goBack":return c;case"goForward":return l}return}(e);var n=t[e];return n===v?void 0:n}function _(e,t){var r=m();if("object"!==(void 0===e?"undefined":n(e)))return r[e]=void 0===t?v:t,function(){b(e)};Object.keys(e).forEach(function(t){r[t]=e[t]})}function b(e){var t=m();delete t[e],0==Object.keys(t).length&&delete h()[p]}function w(e){var t=m(),n=Object.keys(e),r={};function o(){n.forEach(function(e){t[e]=r[e]})}return function(i){n.forEach(function(n){r[n]=t[n],t[n]=e[n]});var a=i();return a&&"function"==typeof a.then?a.then(o).catch(o):o(),a}}!function(){function e(e,t){Object.defineProperty(y,e,{value:t,enumerable:!1,configurable:!0})}e("__get__",g),e("__GetDependency__",g),e("__Rewire__",_),e("__set__",_),e("__reset__",b),e("__ResetDependency__",b),e("__with__",w)}(),t.__get__=g,t.__GetDependency__=g,t.__Rewire__=_,t.__set__=_,t.__ResetDependency__=b,t.__RewireAPI__=y,t.default=y}).call(t,n(3))},function(e,t,n){"use strict";var r=function(){};e.exports=r},function(e,t,n){"use strict";n.d(t,"a",function(){return u}),n.d(t,"b",function(){return s});var r=n(276),o=n(277),i=n(47),a=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},u=function(e,t,n,o){var u=void 0;"string"==typeof e?(u=Object(i.d)(e)).state=t:(void 0===(u=a({},e)).pathname&&(u.pathname=""),u.search?"?"!==u.search.charAt(0)&&(u.search="?"+u.search):u.search="",u.hash?"#"!==u.hash.charAt(0)&&(u.hash="#"+u.hash):u.hash="",void 0!==t&&void 0===u.state&&(u.state=t));try{u.pathname=decodeURI(u.pathname)}catch(e){throw e instanceof URIError?new URIError('Pathname "'+u.pathname+'" could not be decoded. This is likely caused by an invalid percent-encoding.'):e}return n&&(u.key=n),o?u.pathname?"/"!==u.pathname.charAt(0)&&(u.pathname=Object(r.a)(u.pathname,o.pathname)):u.pathname=o.pathname:u.pathname||(u.pathname="/"),u},s=function(e,t){return e.pathname===t.pathname&&e.search===t.search&&e.hash===t.hash&&e.key===t.key&&Object(o.a)(e.state,t.state)}},function(e,t,n){"use strict";var r=n(38),o=n.n(r),i=n(19),a=n.n(i),u=n(1),s=n.n(u),c=n(12),l=n.n(c),f=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};function d(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}var p=function(e){function t(){var n,r;!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t);for(var o=arguments.length,i=Array(o),a=0;a<o;a++)i[a]=arguments[a];return n=r=d(this,e.call.apply(e,[this].concat(i))),r.state={match:r.computeMatch(r.props.history.location.pathname)},d(r,n)}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.getChildContext=function(){return{router:f({},this.context.router,{history:this.props.history,route:{location:this.props.history.location,match:this.state.match}})}},t.prototype.computeMatch=function(e){return{path:"/",url:"/",params:{},isExact:"/"===e}},t.prototype.componentWillMount=function(){var e=this,t=this.props,n=t.children,r=t.history;a()(null==n||1===s.a.Children.count(n),"A <Router> may have only one child element"),this.unlisten=r.listen(function(){e.setState({match:e.computeMatch(r.location.pathname)})})},t.prototype.componentWillReceiveProps=function(e){o()(this.props.history===e.history,"You cannot change <Router history>")},t.prototype.componentWillUnmount=function(){this.unlisten()},t.prototype.render=function(){var e=this.props.children;return e?s.a.Children.only(e):null},t}(s.a.Component);p.propTypes={history:l.a.object.isRequired,children:l.a.node},p.contextTypes={router:l.a.object},p.childContextTypes={router:l.a.object.isRequired},t.a=p},function(e,t,n){"use strict";var r=n(140),o=n.n(r),i={},a=0;t.a=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=arguments[2];"string"==typeof t&&(t={path:t});var r=t,u=r.path,s=r.exact,c=void 0!==s&&s,l=r.strict,f=void 0!==l&&l,d=r.sensitive,p=void 0!==d&&d;if(null==u)return n;var h=function(e,t){var n=""+t.end+t.strict+t.sensitive,r=i[n]||(i[n]={});if(r[e])return r[e];var u=[],s={re:o()(e,u,t),keys:u};return a<1e4&&(r[e]=s,a++),s}(u,{end:c,strict:f,sensitive:p}),m=h.re,v=h.keys,y=m.exec(e);if(!y)return null;var g=y[0],_=y.slice(1),b=e===g;return c&&!b?null:{path:u,url:"/"===u&&""===g?"/":g,isExact:b,params:v.reduce(function(e,t,n){return e[t.name]=_[n],e},{})}}},function(e,t,n){"use strict";t.__esModule=!0;var r=function(e){return e&&e.__esModule?e:{default:e}}(n(126));t.default=function(e,t,n){return t in e?(0,r.default)(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=t&&Number(t.weekStartsOn)||0,o=r(e),i=o.getDay(),a=(i<n?7:0)+i-n;return o.setDate(o.getDate()-a),o.setHours(0,0,0,0),o}},function(e,t,n){var r=n(31),o=6e4,i=864e5;e.exports=function(e,t){var n=r(e),a=r(t),u=n.getTime()-n.getTimezoneOffset()*o,s=a.getTime()-a.getTimezoneOffset()*o;return Math.round((u-s)/i)}},function(e,t,n){var r=n(0),o=n(100);e.exports=function(e,t){var n=r(e),i=Number(t),a=n.getMonth()+i,u=new Date(0);u.setFullYear(n.getFullYear(),a,1),u.setHours(0,0,0,0);var s=o(u);return n.setMonth(a,Math.min(s,n.getDate())),n}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=r(t);return n.getTime()-o.getTime()}},function(e,t,n){"use strict";(function(t){!t.version||0===t.version.indexOf("v0.")||0===t.version.indexOf("v1.")&&0!==t.version.indexOf("v1.8.")?e.exports={nextTick:function(e,n,r,o){if("function"!=typeof e)throw new TypeError('"callback" argument must be a function');var i,a,u=arguments.length;switch(u){case 0:case 1:return t.nextTick(e);case 2:return t.nextTick(function(){e.call(null,n)});case 3:return t.nextTick(function(){e.call(null,n,r)});case 4:return t.nextTick(function(){e.call(null,n,r,o)});default:for(i=new Array(u-1),a=0;a<i.length;)i[a++]=arguments[a];return t.nextTick(function(){e.apply(null,i)})}}}:e.exports=t}).call(t,n(40))},function(e,t,n){var r=n(204),o=r.Buffer;function i(e,t){for(var n in e)t[n]=e[n]}function a(e,t,n){return o(e,t,n)}o.from&&o.alloc&&o.allocUnsafe&&o.allocUnsafeSlow?e.exports=r:(i(r,t),t.Buffer=a),i(o,a),a.from=function(e,t,n){if("number"==typeof e)throw new TypeError("Argument must not be a number");return o(e,t,n)},a.alloc=function(e,t,n){if("number"!=typeof e)throw new TypeError("Argument must be a number");var r=o(e);return void 0!==t?"string"==typeof n?r.fill(t,n):r.fill(t):r.fill(0),r},a.allocUnsafe=function(e){if("number"!=typeof e)throw new TypeError("Argument must be a number");return o(e)},a.allocUnsafeSlow=function(e){if("number"!=typeof e)throw new TypeError("Argument must be a number");return r.SlowBuffer(e)}},function(e,t,n){"use strict";t.a=function(e){"undefined"!=typeof console&&"function"==typeof console.error&&console.error(e);try{throw new Error(e)}catch(e){}}},function(e,t){e.exports=function(e){if(void 0==e)throw TypeError("Can't call method on "+e);return e}},function(e,t,n){var r=n(75)("keys"),o=n(55);e.exports=function(e){return r[e]||(r[e]=o(e))}},function(e,t,n){var r=n(10),o=n(13),i=o["__core-js_shared__"]||(o["__core-js_shared__"]={});(e.exports=function(e,t){return i[e]||(i[e]=void 0!==t?t:{})})("versions",[]).push({version:r.version,mode:n(42)?"pure":"global",copyright:"© 2018 Denis Pushkarev (zloirock.ru)"})},function(e,t,n){var r=n(23),o=n(13).document,i=r(o)&&r(o.createElement);e.exports=function(e){return i?o.createElement(e):{}}},function(e,t,n){var r=n(23);e.exports=function(e,t){if(!r(e))return e;var n,o;if(t&&"function"==typeof(n=e.toString)&&!r(o=n.call(e)))return o;if("function"==typeof(n=e.valueOf)&&!r(o=n.call(e)))return o;if(!t&&"function"==typeof(n=e.toString)&&!r(o=n.call(e)))return o;throw TypeError("Can't convert object to primitive value")}},function(e,t,n){"use strict";t.__esModule=!0;var r=a(n(247)),o=a(n(257)),i="function"==typeof o.default&&"symbol"==typeof r.default?function(e){return typeof e}:function(e){return e&&"function"==typeof o.default&&e.constructor===o.default&&e!==o.default.prototype?"symbol":typeof e};function a(e){return e&&e.__esModule?e:{default:e}}t.default="function"==typeof o.default&&"symbol"===i(r.default)?function(e){return void 0===e?"undefined":i(e)}:function(e){return e&&"function"==typeof o.default&&e.constructor===o.default&&e!==o.default.prototype?"symbol":void 0===e?"undefined":i(e)}},function(e,t){var n=Math.ceil,r=Math.floor;e.exports=function(e){return isNaN(e=+e)?0:(e>0?r:n)(e)}},function(e,t,n){var r=n(20),o=n(251),i=n(82),a=n(74)("IE_PROTO"),u=function(){},s=function(){var e,t=n(76)("iframe"),r=i.length;for(t.style.display="none",n(131).appendChild(t),t.src="javascript:",(e=t.contentWindow.document).open(),e.write("<script>document.F=Object<\/script>"),e.close(),s=e.F;r--;)delete s.prototype[i[r]];return s()};e.exports=Object.create||function(e,t){var n;return null!==e?(u.prototype=r(e),n=new u,u.prototype=null,n[a]=e):n=s(),void 0===t?n:o(n,t)}},function(e,t,n){var r=n(79),o=Math.min;e.exports=function(e){return e>0?o(r(e),9007199254740991):0}},function(e,t){e.exports="constructor,hasOwnProperty,isPrototypeOf,propertyIsEnumerable,toLocaleString,toString,valueOf".split(",")},function(e,t,n){t.f=n(14)},function(e,t,n){var r=n(13),o=n(10),i=n(42),a=n(83),u=n(22).f;e.exports=function(e){var t=o.Symbol||(o.Symbol=i?{}:r.Symbol||{});"_"==e.charAt(0)||e in t||u(t,e,{value:a.f(e)})}},function(e,t){t.f=Object.getOwnPropertySymbols},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=n(136);n.d(t,"MemoryRouter",function(){return r.a});var o=n(138);n.d(t,"Prompt",function(){return o.a});var i=n(139);n.d(t,"Redirect",function(){return i.a});var a=n(89);n.d(t,"Route",function(){return a.a});var u=n(63);n.d(t,"Router",function(){return u.a});var s=n(141);n.d(t,"StaticRouter",function(){return s.a});var c=n(142);n.d(t,"Switch",function(){return c.a});var l=n(88);n.d(t,"generatePath",function(){return l.a});var f=n(64);n.d(t,"matchPath",function(){return f.a});var d=n(143);n.d(t,"withRouter",function(){return d.a})},function(e,t,n){"use strict";var r=n(61),o=n.n(r);t.a=function(){var e=null,t=[];return{setPrompt:function(t){return o()(null==e,"A history supports only one prompt at a time"),e=t,function(){e===t&&(e=null)}},confirmTransitionTo:function(t,n,r,i){if(null!=e){var a="function"==typeof e?e(t,n):e;"string"==typeof a?"function"==typeof r?r(a,i):(o()(!1,"A history needs a getUserConfirmation function in order to use a prompt message"),i(!0)):i(!1!==a)}else i(!0)},appendListener:function(e){var n=!0,r=function(){n&&e.apply(void 0,arguments)};return t.push(r),function(){n=!1,t=t.filter(function(e){return e!==r})}},notifyListeners:function(){for(var e=arguments.length,n=Array(e),r=0;r<e;r++)n[r]=arguments[r];t.forEach(function(e){return e.apply(void 0,n)})}}}},function(e,t,n){"use strict";var r=n(140),o=n.n(r),i={},a=0;t.a=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"/",t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return"/"===e?e:function(e){var t=e,n=i[t]||(i[t]={});if(n[e])return n[e];var r=o.a.compile(e);return a<1e4&&(n[e]=r,a++),r}(e)(t,{pretty:!0})}},function(e,t,n){"use strict";var r=n(38),o=n.n(r),i=n(19),a=n.n(i),u=n(1),s=n.n(u),c=n(12),l=n.n(c),f=n(64),d=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};function p(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}var h=function(e){return 0===s.a.Children.count(e)},m=function(e){function t(){var n,r;!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t);for(var o=arguments.length,i=Array(o),a=0;a<o;a++)i[a]=arguments[a];return n=r=p(this,e.call.apply(e,[this].concat(i))),r.state={match:r.computeMatch(r.props,r.context.router)},p(r,n)}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.getChildContext=function(){return{router:d({},this.context.router,{route:{location:this.props.location||this.context.router.route.location,match:this.state.match}})}},t.prototype.computeMatch=function(e,t){var n=e.computedMatch,r=e.location,o=e.path,i=e.strict,u=e.exact,s=e.sensitive;if(n)return n;a()(t,"You should not use <Route> or withRouter() outside a <Router>");var c=t.route,l=(r||c.location).pathname;return Object(f.a)(l,{path:o,strict:i,exact:u,sensitive:s},c.match)},t.prototype.componentWillMount=function(){o()(!(this.props.component&&this.props.render),"You should not use <Route component> and <Route render> in the same route; <Route render> will be ignored"),o()(!(this.props.component&&this.props.children&&!h(this.props.children)),"You should not use <Route component> and <Route children> in the same route; <Route children> will be ignored"),o()(!(this.props.render&&this.props.children&&!h(this.props.children)),"You should not use <Route render> and <Route children> in the same route; <Route children> will be ignored")},t.prototype.componentWillReceiveProps=function(e,t){o()(!(e.location&&!this.props.location),'<Route> elements should not change from uncontrolled to controlled (or vice versa). You initially used no "location" prop and then provided one on a subsequent render.'),o()(!(!e.location&&this.props.location),'<Route> elements should not change from controlled to uncontrolled (or vice versa). You provided a "location" prop initially but omitted it on a subsequent render.'),this.setState({match:this.computeMatch(e,t.router)})},t.prototype.render=function(){var e=this.state.match,t=this.props,n=t.children,r=t.component,o=t.render,i=this.context.router,a=i.history,u=i.route,c=i.staticContext,l={match:e,location:this.props.location||u.location,history:a,staticContext:c};return r?e?s.a.createElement(r,l):null:o?e?o(l):null:"function"==typeof n?n(l):n&&!h(n)?s.a.Children.only(n):null},t}(s.a.Component);m.propTypes={computedMatch:l.a.object,path:l.a.string,exact:l.a.bool,strict:l.a.bool,sensitive:l.a.bool,component:l.a.func,render:l.a.func,children:l.a.oneOfType([l.a.func,l.a.node]),location:l.a.object},m.contextTypes={router:l.a.shape({history:l.a.object.isRequired,route:l.a.object.isRequired,staticContext:l.a.object})},m.childContextTypes={router:l.a.object.isRequired},t.a=m},function(e,t,n){"use strict";e.exports=n(288)},function(e,t,n){"use strict";t.a=function(e){"undefined"!=typeof console&&"function"==typeof console.error&&console.error(e);try{throw new Error(e)}catch(e){}}},function(e,t,n){"use strict";var r=n(63);t.a=r.a},function(e,t,n){e.exports={default:n(333),__esModule:!0}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.history=t.store=void 0;var r=n(15),o=n(135),i=n(27),a=f(n(345)),u=n(17),s=f(n(346)),c=f(n(347)),l=f(n(353));function f(e){return e&&e.__esModule?e:{default:e}}var d=(0,r.combineReducers)({auth:function(){return arguments.length>0&&void 0!==arguments[0]?arguments[0]:(0,u.login)()},metadata:s.default,search:c.default,review:l.default});var p=(0,i.createBrowserHistory)(),h=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=arguments[1],n=window.__REDUX_DEVTOOLS_EXTENSION_COMPOSE__||r.compose;return(0,r.createStore)((0,o.connectRouter)(t)(d),e,n((0,r.applyMiddleware)(a.default,(0,o.routerMiddleware)(t))))}({},p);t.store=h,t.history=p},function(e,t,n){var r=n(96),o=n(14)("iterator"),i=n(36);e.exports=n(10).getIteratorMethod=function(e){if(void 0!=e)return e[o]||e["@@iterator"]||i[r(e)]}},function(e,t,n){var r=n(46),o=n(14)("toStringTag"),i="Arguments"==r(function(){return arguments}());e.exports=function(e){var t,n,a;return void 0===e?"Undefined":null===e?"Null":"string"==typeof(n=function(e,t){try{return e[t]}catch(e){}}(t=Object(e),o))?n:i?r(t):"Object"==(a=r(t))&&"function"==typeof t.callee?"Arguments":a}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.create=t.dedupe=t.exportCSV=t.clear=t.refresh=t.toggleSaved=t.unsave=t.save=void 0;var r=f(n(365)),o=f(n(28)),i=n(165),a=f(n(478)),u=f(n(496)),s=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(39)),c=n(17),l=n(159);function f(e){return e&&e.__esModule?e:{default:e}}var d=function(){return"/api/images/import/new/"},p=function(e){return{type:s.metadata.loading,tag:e}},h=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return{type:s.metadata.loaded,tag:e,data:t}},m=function(e,t){return{type:s.metadata.error,tag:e,err:t}},v=t.save=function(e){return function(t){console.log("save",e);var n=(0,l.getSavedFromStore)(),r=n[e.hash]||{frames:{},hash:e.hash,verified:e.verified};e.frame&&(r.frames[parseInt(e.frame,10)]=!0),r.verified=e.verified,n[e.hash]=r,t({type:s.review.save,saved:n}),t({type:s.review.dedupe,payload:!1})}},y=t.unsave=function(e){return function(t){console.log("unsave",e);var n=(0,l.getSavedFromStore)(),r=n[e.hash];r&&e.frame&&r.frames[parseInt(e.frame,10)]&&(r.frames[parseInt(e.frame,10)]=!1),t({type:s.review.unsave,saved:n})}};t.toggleSaved=function(e){return function(t){var n=e.hash,r=e.frame,o=(0,l.getSavedFromStore)(),i=o[n],a=!1;console.log(o,i),i&&r&&i.frames&&i.frames[parseInt(r,10)]&&(a=i.frames[parseInt(r,10)]),console.log(a),a?y(e)(t):v(e)(t)}},t.refresh=function(){return function(e){var t=(0,l.getSavedFromStore)();(0,o.default)(t).forEach(function(e){var n=t[e],r=0,i=(0,o.default)(n.frames);i.forEach(function(e){n.frames[e]||(delete n.frames[e],r+=1)}),i.length&&i.length!==r||delete t[e]}),e({type:s.review.refresh,saved:t})}},t.clear=function(){return function(e){e({type:s.review.clear})}},t.exportCSV=function(){return function(e){console.log("export CSV");var t=(0,l.getSavedFromStore)(),n=(0,o.default)(t).sort().map(function(e){var n=t[e],r=n.verified,i=n.hash,a=n.frames;return[i,(0,o.default)(a).join(", "),(0,c.verify)(r)]});(0,a.default)(n,function(t,r){var o=new Blob([r],{type:"text/csv"});(0,u.default)(o,"vsearch_investigation_"+(0,i.format)(new Date,"YYYYMMDD_HHmm")+".csv"),e(h("csv",{count:n.length}))})}},t.dedupe=function(){return function(e){return e(p("dedupe")),new r.default(function(t,n){var r=(0,l.getSavedUrls)();(0,c.post)("/api/images/import/search/",{urls:r}).then(function(n){var r=n.good,o=n.bad,i=(0,l.getSavedFromStore)();o.forEach(function(e){var t=e.image;console.log(t);var n=t.sa_hash,r=t.frame,o=parseInt(r,10);i[n]&&i[n].frames[o]&&(i[n].frames[o]=!1,1)}),e({type:s.review.save,saved:i}),e({type:s.review.dedupe,payload:!0}),t(r,o)}).catch(function(t){e({type:s.review.dedupe,payload:!1}),n(t)})})}},t.create=function(e){var t=e.title,n=e.graphic;return function(e){var r=(0,l.getSavedUrls)();return t?r?(e(p("create")),(0,c.post)(d(),{title:t,graphic:n,urls:r}).then(function(t){e(h("create")),window.location.href="/groups/show/"+t.image_group.id}).catch(function(t){e(m("create")),console.log(t)})):e(m("create","No images to save")):e(m("create","No title"))}}},function(e,t,n){"use strict";var r=n(56);e.exports.f=function(e){return new function(e){var t,n;this.promise=new e(function(e,r){if(void 0!==t||void 0!==n)throw TypeError("Bad Promise constructor");t=e,n=r}),this.resolve=r(t),this.reject=r(n)}(e)}},function(e,t){e.exports=function(e){return e instanceof Date}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e),n=t.getFullYear(),o=t.getMonth(),i=new Date(0);return i.setFullYear(n,o+1,0),i.setHours(0,0,0,0),i.getDate()}},function(e,t,n){var r=n(50);e.exports=function(e,t){var n=Number(t);return r(e,7*n)}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e).getTime(),o=r(t).getTime();return n>o?-1:n<o?1:0}},function(e,t,n){var r=n(0),o=n(174),i=n(53);e.exports=function(e,t){var n=r(e),a=r(t),u=i(n,a),s=Math.abs(o(n,a));return n.setMonth(n.getMonth()-u*s),u*(s-(i(n,a)===-u))}},function(e,t,n){var r=n(69);e.exports=function(e,t){var n=r(e,t)/1e3;return n>0?Math.floor(n):Math.ceil(n)}},function(e,t,n){var r=n(389),o=n(390);e.exports={distanceInWords:r(),format:o()}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e);return t.setHours(23,59,59,999),t}},function(e,t,n){var r=n(0),o=n(30),i=n(52),a=6048e5;e.exports=function(e){var t=r(e),n=o(t).getTime()-i(t).getTime();return Math.round(n/a)+1}},function(e,t,n){var r=n(66);e.exports=function(e,t,n){var o=r(e,n),i=r(t,n);return o.getTime()===i.getTime()}},function(e,t){function n(){this._events=this._events||{},this._maxListeners=this._maxListeners||void 0}function r(e){return"function"==typeof e}function o(e){return"object"==typeof e&&null!==e}function i(e){return void 0===e}e.exports=n,n.EventEmitter=n,n.prototype._events=void 0,n.prototype._maxListeners=void 0,n.defaultMaxListeners=10,n.prototype.setMaxListeners=function(e){if(!function(e){return"number"==typeof e}(e)||e<0||isNaN(e))throw TypeError("n must be a positive number");return this._maxListeners=e,this},n.prototype.emit=function(e){var t,n,a,u,s,c;if(this._events||(this._events={}),"error"===e&&(!this._events.error||o(this._events.error)&&!this._events.error.length)){if((t=arguments[1])instanceof Error)throw t;var l=new Error('Uncaught, unspecified "error" event. ('+t+")");throw l.context=t,l}if(i(n=this._events[e]))return!1;if(r(n))switch(arguments.length){case 1:n.call(this);break;case 2:n.call(this,arguments[1]);break;case 3:n.call(this,arguments[1],arguments[2]);break;default:u=Array.prototype.slice.call(arguments,1),n.apply(this,u)}else if(o(n))for(u=Array.prototype.slice.call(arguments,1),a=(c=n.slice()).length,s=0;s<a;s++)c[s].apply(this,u);return!0},n.prototype.addListener=function(e,t){var a;if(!r(t))throw TypeError("listener must be a function");return this._events||(this._events={}),this._events.newListener&&this.emit("newListener",e,r(t.listener)?t.listener:t),this._events[e]?o(this._events[e])?this._events[e].push(t):this._events[e]=[this._events[e],t]:this._events[e]=t,o(this._events[e])&&!this._events[e].warned&&(a=i(this._maxListeners)?n.defaultMaxListeners:this._maxListeners)&&a>0&&this._events[e].length>a&&(this._events[e].warned=!0,console.error("(node) warning: possible EventEmitter memory leak detected. %d listeners added. Use emitter.setMaxListeners() to increase limit.",this._events[e].length),"function"==typeof console.trace&&console.trace()),this},n.prototype.on=n.prototype.addListener,n.prototype.once=function(e,t){if(!r(t))throw TypeError("listener must be a function");var n=!1;function o(){this.removeListener(e,o),n||(n=!0,t.apply(this,arguments))}return o.listener=t,this.on(e,o),this},n.prototype.removeListener=function(e,t){var n,i,a,u;if(!r(t))throw TypeError("listener must be a function");if(!this._events||!this._events[e])return this;if(a=(n=this._events[e]).length,i=-1,n===t||r(n.listener)&&n.listener===t)delete this._events[e],this._events.removeListener&&this.emit("removeListener",e,t);else if(o(n)){for(u=a;u-- >0;)if(n[u]===t||n[u].listener&&n[u].listener===t){i=u;break}if(i<0)return this;1===n.length?(n.length=0,delete this._events[e]):n.splice(i,1),this._events.removeListener&&this.emit("removeListener",e,t)}return this},n.prototype.removeAllListeners=function(e){var t,n;if(!this._events)return this;if(!this._events.removeListener)return 0===arguments.length?this._events={}:this._events[e]&&delete this._events[e],this;if(0===arguments.length){for(t in this._events)"removeListener"!==t&&this.removeAllListeners(t);return this.removeAllListeners("removeListener"),this._events={},this}if(r(n=this._events[e]))this.removeListener(e,n);else if(n)for(;n.length;)this.removeListener(e,n[n.length-1]);return delete this._events[e],this},n.prototype.listeners=function(e){return this._events&&this._events[e]?r(this._events[e])?[this._events[e]]:this._events[e].slice():[]},n.prototype.listenerCount=function(e){if(this._events){var t=this._events[e];if(r(t))return 1;if(t)return t.length}return 0},n.listenerCount=function(e,t){return e.listenerCount(t)}},function(e,t,n){(t=e.exports=n(201)).Stream=t,t.Readable=t,t.Writable=n(111),t.Duplex=n(33),t.Transform=n(207),t.PassThrough=n(488)},function(e,t,n){"use strict";(function(t,r,o){var i=n(70);function a(e){var t=this;this.next=null,this.entry=null,this.finish=function(){!function(e,t,n){var r=e.entry;e.entry=null;for(;r;){var o=r.callback;t.pendingcb--,o(n),r=r.next}t.corkedRequestsFree?t.corkedRequestsFree.next=e:t.corkedRequestsFree=e}(t,e)}}e.exports=g;var u,s=!t.browser&&["v0.10","v0.9."].indexOf(t.version.slice(0,5))>-1?r:i.nextTick;g.WritableState=y;var c=n(54);c.inherits=n(32);var l={deprecate:n(487)},f=n(203),d=n(71).Buffer,p=o.Uint8Array||function(){};var h,m=n(205);function v(){}function y(e,t){u=u||n(33),e=e||{};var r=t instanceof u;this.objectMode=!!e.objectMode,r&&(this.objectMode=this.objectMode||!!e.writableObjectMode);var o=e.highWaterMark,c=e.writableHighWaterMark,l=this.objectMode?16:16384;this.highWaterMark=o||0===o?o:r&&(c||0===c)?c:l,this.highWaterMark=Math.floor(this.highWaterMark),this.finalCalled=!1,this.needDrain=!1,this.ending=!1,this.ended=!1,this.finished=!1,this.destroyed=!1;var f=!1===e.decodeStrings;this.decodeStrings=!f,this.defaultEncoding=e.defaultEncoding||"utf8",this.length=0,this.writing=!1,this.corked=0,this.sync=!0,this.bufferProcessing=!1,this.onwrite=function(e){!function(e,t){var n=e._writableState,r=n.sync,o=n.writecb;if(function(e){e.writing=!1,e.writecb=null,e.length-=e.writelen,e.writelen=0}(n),t)!function(e,t,n,r,o){--t.pendingcb,n?(i.nextTick(o,r),i.nextTick(O,e,t),e._writableState.errorEmitted=!0,e.emit("error",r)):(o(r),e._writableState.errorEmitted=!0,e.emit("error",r),O(e,t))}(e,n,r,t,o);else{var a=x(n);a||n.corked||n.bufferProcessing||!n.bufferedRequest||w(e,n),r?s(b,e,n,a,o):b(e,n,a,o)}}(t,e)},this.writecb=null,this.writelen=0,this.bufferedRequest=null,this.lastBufferedRequest=null,this.pendingcb=0,this.prefinished=!1,this.errorEmitted=!1,this.bufferedRequestCount=0,this.corkedRequestsFree=new a(this)}function g(e){if(u=u||n(33),!(h.call(g,this)||this instanceof u))return new g(e);this._writableState=new y(e,this),this.writable=!0,e&&("function"==typeof e.write&&(this._write=e.write),"function"==typeof e.writev&&(this._writev=e.writev),"function"==typeof e.destroy&&(this._destroy=e.destroy),"function"==typeof e.final&&(this._final=e.final)),f.call(this)}function _(e,t,n,r,o,i,a){t.writelen=r,t.writecb=a,t.writing=!0,t.sync=!0,n?e._writev(o,t.onwrite):e._write(o,i,t.onwrite),t.sync=!1}function b(e,t,n,r){n||function(e,t){0===t.length&&t.needDrain&&(t.needDrain=!1,e.emit("drain"))}(e,t),t.pendingcb--,r(),O(e,t)}function w(e,t){t.bufferProcessing=!0;var n=t.bufferedRequest;if(e._writev&&n&&n.next){var r=t.bufferedRequestCount,o=new Array(r),i=t.corkedRequestsFree;i.entry=n;for(var u=0,s=!0;n;)o[u]=n,n.isBuf||(s=!1),n=n.next,u+=1;o.allBuffers=s,_(e,t,!0,t.length,o,"",i.finish),t.pendingcb++,t.lastBufferedRequest=null,i.next?(t.corkedRequestsFree=i.next,i.next=null):t.corkedRequestsFree=new a(t),t.bufferedRequestCount=0}else{for(;n;){var c=n.chunk,l=n.encoding,f=n.callback;if(_(e,t,!1,t.objectMode?1:c.length,c,l,f),n=n.next,t.bufferedRequestCount--,t.writing)break}null===n&&(t.lastBufferedRequest=null)}t.bufferedRequest=n,t.bufferProcessing=!1}function x(e){return e.ending&&0===e.length&&null===e.bufferedRequest&&!e.finished&&!e.writing}function E(e,t){e._final(function(n){t.pendingcb--,n&&e.emit("error",n),t.prefinished=!0,e.emit("prefinish"),O(e,t)})}function O(e,t){var n=x(t);return n&&(!function(e,t){t.prefinished||t.finalCalled||("function"==typeof e._final?(t.pendingcb++,t.finalCalled=!0,i.nextTick(E,e,t)):(t.prefinished=!0,e.emit("prefinish")))}(e,t),0===t.pendingcb&&(t.finished=!0,e.emit("finish"))),n}c.inherits(g,f),y.prototype.getBuffer=function(){for(var e=this.bufferedRequest,t=[];e;)t.push(e),e=e.next;return t},function(){try{Object.defineProperty(y.prototype,"buffer",{get:l.deprecate(function(){return this.getBuffer()},"_writableState.buffer is deprecated. Use _writableState.getBuffer instead.","DEP0003")})}catch(e){}}(),"function"==typeof Symbol&&Symbol.hasInstance&&"function"==typeof Function.prototype[Symbol.hasInstance]?(h=Function.prototype[Symbol.hasInstance],Object.defineProperty(g,Symbol.hasInstance,{value:function(e){return!!h.call(this,e)||this===g&&(e&&e._writableState instanceof y)}})):h=function(e){return e instanceof this},g.prototype.pipe=function(){this.emit("error",new Error("Cannot pipe, not readable"))},g.prototype.write=function(e,t,n){var r=this._writableState,o=!1,a=!r.objectMode&&function(e){return d.isBuffer(e)||e instanceof p}(e);return a&&!d.isBuffer(e)&&(e=function(e){return d.from(e)}(e)),"function"==typeof t&&(n=t,t=null),a?t="buffer":t||(t=r.defaultEncoding),"function"!=typeof n&&(n=v),r.ended?function(e,t){var n=new Error("write after end");e.emit("error",n),i.nextTick(t,n)}(this,n):(a||function(e,t,n,r){var o=!0,a=!1;return null===n?a=new TypeError("May not write null values to stream"):"string"==typeof n||void 0===n||t.objectMode||(a=new TypeError("Invalid non-string/buffer chunk")),a&&(e.emit("error",a),i.nextTick(r,a),o=!1),o}(this,r,e,n))&&(r.pendingcb++,o=function(e,t,n,r,o,i){if(!n){var a=function(e,t,n){e.objectMode||!1===e.decodeStrings||"string"!=typeof t||(t=d.from(t,n));return t}(t,r,o);r!==a&&(n=!0,o="buffer",r=a)}var u=t.objectMode?1:r.length;t.length+=u;var s=t.length<t.highWaterMark;s||(t.needDrain=!0);if(t.writing||t.corked){var c=t.lastBufferedRequest;t.lastBufferedRequest={chunk:r,encoding:o,isBuf:n,callback:i,next:null},c?c.next=t.lastBufferedRequest:t.bufferedRequest=t.lastBufferedRequest,t.bufferedRequestCount+=1}else _(e,t,!1,u,r,o,i);return s}(this,r,a,e,t,n)),o},g.prototype.cork=function(){this._writableState.corked++},g.prototype.uncork=function(){var e=this._writableState;e.corked&&(e.corked--,e.writing||e.corked||e.finished||e.bufferProcessing||!e.bufferedRequest||w(this,e))},g.prototype.setDefaultEncoding=function(e){if("string"==typeof e&&(e=e.toLowerCase()),!(["hex","utf8","utf-8","ascii","binary","base64","ucs2","ucs-2","utf16le","utf-16le","raw"].indexOf((e+"").toLowerCase())>-1))throw new TypeError("Unknown encoding: "+e);return this._writableState.defaultEncoding=e,this},Object.defineProperty(g.prototype,"writableHighWaterMark",{enumerable:!1,get:function(){return this._writableState.highWaterMark}}),g.prototype._write=function(e,t,n){n(new Error("_write() is not implemented"))},g.prototype._writev=null,g.prototype.end=function(e,t,n){var r=this._writableState;"function"==typeof e?(n=e,e=null,t=null):"function"==typeof t&&(n=t,t=null),null!==e&&void 0!==e&&this.write(e,t),r.corked&&(r.corked=1,this.uncork()),r.ending||r.finished||function(e,t,n){t.ending=!0,O(e,t),n&&(t.finished?i.nextTick(n):e.once("finish",n));t.ended=!0,e.writable=!1}(this,r,n)},Object.defineProperty(g.prototype,"destroyed",{get:function(){return void 0!==this._writableState&&this._writableState.destroyed},set:function(e){this._writableState&&(this._writableState.destroyed=e)}}),g.prototype.destroy=m.destroy,g.prototype._undestroy=m.undestroy,g.prototype._destroy=function(e,t){this.end(),t(e)}}).call(t,n(40),n(485).setImmediate,n(3))},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.fetchMetadata=t.fetchMediaRecord=t.dispatchFetch=t.setHash=void 0;var r=function(e){return e&&e.__esModule?e:{default:e}}(n(503)),o=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(39));var i={mediaRecord:function(e){return"/search/api/mediarecord/"+e},metadata:function(e){return"/api/metadata/"+e+"/"}},a=function(e,t){return{type:o.metadata.loading,tag:e,hash:t}},u=function(e,t,n){return{type:o.metadata.error,tag:e,hash:t,err:n}},s=(t.setHash=function(e){return function(t){t({type:o.metadata.set_hash,hash:e})}},t.dispatchFetch=function(e,t){return function(n){n(a(e,t)),function(e){return fetch(e,{}).then(function(e){return e.json()})}(i[e](t)).then(function(r){return n(function(e,t,n){return{type:o.metadata.loaded,tag:e,hash:t,data:n}}(e,t,r))}).catch(function(r){return n(u(e,t,r))})}});t.fetchMediaRecord=function(e){return function(t){return s("mediaRecord",e)(t)}},t.fetchMetadata=function(e){return function(t){t(a("metadata",e)),(0,r.default)(i.metadata(e)).then(function(e){return e.json()}).then(function(n){return t(function(e,t,n){return{type:o.metadata.loaded_many,tag:e,hash:t,data:n}}("metadata",e,n))}).catch(function(n){return t(u("metadata",e,n))})}}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.places365=t.coco=void 0;var r=i(n(509)),o=i(n(510));function i(e){return e&&e.__esModule?e:{default:e}}t.coco=r.default,t.places365=o.default},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=y(n(4)),o=y(n(5)),i=y(n(6)),a=y(n(7)),u=y(n(8)),s=y(n(9)),c=n(1),l=y(c),f=n(16),d=n(15),p=n(2),h=n(165),m=n(17),v=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(21));function y(e){return e&&e.__esModule?e:{default:e}}var g=function(e){function t(){return(0,i.default)(this,t),(0,u.default)(this,(t.__proto__||(0,o.default)(t)).apply(this,arguments))}return(0,s.default)(t,e),(0,a.default)(t,[{key:"render",value:function(){var e=this.props,t=e.query,n=e.metadata,r=e.sugarcube;if(!t||!n||!n.mediainfo||"loading"===n.metadata)return l.default.createElement("div",{className:"searchMeta"});var o=n.mediainfo.sugarcube_id,i=n.mediainfo.metadata.mediainfo.video,a=t.crop||{},u=a.x,s=a.y,c=a.w,d=a.h;return l.default.createElement("div",{className:"searchMeta"},"verified"in t&&l.default.createElement("span",{className:t.verified?"verified":"unverified"},t.verified?"verified":"unverified"),t.hash&&l.default.createElement("span",null,"sha256: ",l.default.createElement(f.Link,{className:"sha256",to:v.publicUrl.browse(t.hash)},t.hash)),t.frame&&l.default.createElement("span",null,"Frame: ",(0,m.timestamp)(t.frame,i.frame_rate)," / ",(0,m.timestamp)(i.duration/1e3,1)),t.crop&&l.default.createElement("span",null,"Crop: ",parseInt(c,10)+"x"+parseInt(d,10)+" @ ("+parseInt(u,10)+", "+parseInt(s,10)+")"),!(!i||!i.encoded_date)&&l.default.createElement("span",null,"Date: ",(0,h.format)(new Date(i.encoded_date),"DD-MMM-YYYY")),!(!r||!o)&&l.default.createElement("span",null,"sugarcube: ",o))}}]),t}(c.Component);t.default=(0,p.connect)(function(e){return{metadata:e.metadata}},function(e){return{actions:(0,d.bindActionCreators)((0,r.default)({},v),e)}})(g)},function(e,t,n){"use strict"; -/* -object-assign -(c) Sindre Sorhus -@license MIT -*/var r=Object.getOwnPropertySymbols,o=Object.prototype.hasOwnProperty,i=Object.prototype.propertyIsEnumerable;e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map(function(e){return t[e]}).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach(function(e){r[e]=e}),"abcdefghijklmnopqrst"===Object.keys(Object.assign({},r)).join("")}catch(e){return!1}}()?Object.assign:function(e,t){for(var n,a,u=function(e){if(null===e||void 0===e)throw new TypeError("Object.assign cannot be called with null or undefined");return Object(e)}(e),s=1;s<arguments.length;s++){for(var c in n=Object(arguments[s]))o.call(n,c)&&(u[c]=n[c]);if(r){a=r(n);for(var l=0;l<a.length;l++)i.call(n,a[l])&&(u[a[l]]=n[a[l]])}}return u}},function(e,t,n){"use strict";n.d(t,"b",function(){return i}),n.d(t,"a",function(){return a});var r=n(12),o=n.n(r),i=o.a.shape({trySubscribe:o.a.func.isRequired,tryUnsubscribe:o.a.func.isRequired,notifyNestedSubs:o.a.func.isRequired,isSubscribed:o.a.func.isRequired}),a=o.a.shape({subscribe:o.a.func.isRequired,dispatch:o.a.func.isRequired,getState:o.a.func.isRequired})},function(e,t,n){"use strict";t.a=function(e){var t,n,r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},i=r.getDisplayName,h=void 0===i?function(e){return"ConnectAdvanced("+e+")"}:i,m=r.methodName,v=void 0===m?"connectAdvanced":m,y=r.renderCountProp,g=void 0===y?void 0:y,_=r.shouldHandleStateChanges,b=void 0===_||_,w=r.storeKey,x=void 0===w?"store":w,E=r.withRef,O=void 0!==E&&E,S=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(r,["getDisplayName","methodName","renderCountProp","shouldHandleStateChanges","storeKey","withRef"]),T=x+"Subscription",k=f++,R=((t={})[x]=c.a,t[T]=c.b,t),j=((n={})[T]=c.b,n);return function(t){a()("function"==typeof t,"You must pass a component to the function returned by "+v+". Instead received "+JSON.stringify(t));var n=t.displayName||t.name||"Component",r=h(n),i=l({},S,{getDisplayName:h,methodName:v,renderCountProp:g,shouldHandleStateChanges:b,storeKey:x,withRef:O,displayName:r,wrappedComponentName:n,WrappedComponent:t}),c=function(n){function o(e,t){!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,o);var i=function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}(this,n.call(this,e,t));return i.version=k,i.state={},i.renderCount=0,i.store=e[x]||t[x],i.propsMode=Boolean(e[x]),i.setWrappedInstance=i.setWrappedInstance.bind(i),a()(i.store,'Could not find "'+x+'" in either the context or props of "'+r+'". Either wrap the root component in a <Provider>, or explicitly pass "'+x+'" as a prop to "'+r+'".'),i.initSelector(),i.initSubscription(),i}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(o,n),o.prototype.getChildContext=function(){var e,t=this.propsMode?null:this.subscription;return(e={})[T]=t||this.context[T],e},o.prototype.componentDidMount=function(){b&&(this.subscription.trySubscribe(),this.selector.run(this.props),this.selector.shouldComponentUpdate&&this.forceUpdate())},o.prototype.componentWillReceiveProps=function(e){this.selector.run(e)},o.prototype.shouldComponentUpdate=function(){return this.selector.shouldComponentUpdate},o.prototype.componentWillUnmount=function(){this.subscription&&this.subscription.tryUnsubscribe(),this.subscription=null,this.notifyNestedSubs=p,this.store=null,this.selector.run=p,this.selector.shouldComponentUpdate=!1},o.prototype.getWrappedInstance=function(){return a()(O,"To access the wrapped instance, you need to specify { withRef: true } in the options argument of the "+v+"() call."),this.wrappedInstance},o.prototype.setWrappedInstance=function(e){this.wrappedInstance=e},o.prototype.initSelector=function(){var t=e(this.store.dispatch,i);this.selector=function(e,t){var n={run:function(r){try{var o=e(t.getState(),r);(o!==n.props||n.error)&&(n.shouldComponentUpdate=!0,n.props=o,n.error=null)}catch(e){n.shouldComponentUpdate=!0,n.error=e}}};return n}(t,this.store),this.selector.run(this.props)},o.prototype.initSubscription=function(){if(b){var e=(this.propsMode?this.props:this.context)[T];this.subscription=new s.a(this.store,e,this.onStateChange.bind(this)),this.notifyNestedSubs=this.subscription.notifyNestedSubs.bind(this.subscription)}},o.prototype.onStateChange=function(){this.selector.run(this.props),this.selector.shouldComponentUpdate?(this.componentDidUpdate=this.notifyNestedSubsOnComponentDidUpdate,this.setState(d)):this.notifyNestedSubs()},o.prototype.notifyNestedSubsOnComponentDidUpdate=function(){this.componentDidUpdate=void 0,this.notifyNestedSubs()},o.prototype.isSubscribed=function(){return Boolean(this.subscription)&&this.subscription.isSubscribed()},o.prototype.addExtraProps=function(e){if(!(O||g||this.propsMode&&this.subscription))return e;var t=l({},e);return O&&(t.ref=this.setWrappedInstance),g&&(t[g]=this.renderCount++),this.propsMode&&this.subscription&&(t[T]=this.subscription),t},o.prototype.render=function(){var e=this.selector;if(e.shouldComponentUpdate=!1,e.error)throw e.error;return Object(u.createElement)(t,this.addExtraProps(e.props))},o}(u.Component);return c.WrappedComponent=t,c.displayName=r,c.childContextTypes=j,c.contextTypes=R,c.propTypes=R,o()(c,t)}};var r=n(118),o=n.n(r),i=n(19),a=n.n(i),u=n(1),s=(n.n(u),n(223)),c=n(116),l=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};var f=0,d={};function p(){}},function(e,t,n){"use strict";var r={childContextTypes:!0,contextTypes:!0,defaultProps:!0,displayName:!0,getDefaultProps:!0,getDerivedStateFromProps:!0,mixins:!0,propTypes:!0,type:!0},o={name:!0,length:!0,prototype:!0,caller:!0,callee:!0,arguments:!0,arity:!0},i=Object.defineProperty,a=Object.getOwnPropertyNames,u=Object.getOwnPropertySymbols,s=Object.getOwnPropertyDescriptor,c=Object.getPrototypeOf,l=c&&c(Object);e.exports=function e(t,n,f){if("string"!=typeof n){if(l){var d=c(n);d&&d!==l&&e(t,d,f)}var p=a(n);u&&(p=p.concat(u(n)));for(var h=0;h<p.length;++h){var m=p[h];if(!(r[m]||o[m]||f&&f[m])){var v=s(n,m);try{i(t,m,v)}catch(e){}}}return t}return t}},function(e,t){e.exports=function(e){if(!e.webpackPolyfill){var t=Object.create(e);t.children||(t.children=[]),Object.defineProperty(t,"loaded",{enumerable:!0,get:function(){return t.l}}),Object.defineProperty(t,"id",{enumerable:!0,get:function(){return t.i}}),Object.defineProperty(t,"exports",{enumerable:!0}),t.webpackPolyfill=1}return t}},function(e,t,n){"use strict";t.a=function(e){return function(t,n){var r=e(t,n);function o(){return r}return o.dependsOnOwnProps=!1,o}},t.b=function(e,t){return function(t,n){n.displayName;var o=function(e,t){return o.dependsOnOwnProps?o.mapToProps(e,t):o.mapToProps(e)};return o.dependsOnOwnProps=!0,o.mapToProps=function(t,n){o.mapToProps=e,o.dependsOnOwnProps=r(e);var i=o(t,n);return"function"==typeof i&&(o.mapToProps=i,o.dependsOnOwnProps=r(i),i=o(t,n)),i},o}};n(121);function r(e){return null!==e.dependsOnOwnProps&&void 0!==e.dependsOnOwnProps?Boolean(e.dependsOnOwnProps):1!==e.length}},function(e,t,n){"use strict";n(229),n(72)},function(e,t,n){"use strict";var r=n(231).a.Symbol;t.a=r},function(e,t,n){var r=n(25),o=n(41),i=n(74)("IE_PROTO"),a=Object.prototype;e.exports=Object.getPrototypeOf||function(e){return e=o(e),r(e,i)?e[i]:"function"==typeof e.constructor&&e instanceof e.constructor?e.constructor.prototype:e instanceof Object?a:null}},function(e,t,n){var r=n(18),o=n(10),i=n(35);e.exports=function(e,t){var n=(o.Object||{})[e]||Object[e],a={};a[e]=t(n),r(r.S+r.F*i(function(){n(1)}),"Object",a)}},function(e,t,n){e.exports=!n(24)&&!n(35)(function(){return 7!=Object.defineProperty(n(76)("div"),"a",{get:function(){return 7}}).a})},function(e,t,n){e.exports={default:n(245),__esModule:!0}},function(e,t,n){"use strict";var r=n(42),o=n(18),i=n(128),a=n(26),u=n(36),s=n(250),c=n(57),l=n(123),f=n(14)("iterator"),d=!([].keys&&"next"in[].keys()),p=function(){return this};e.exports=function(e,t,n,h,m,v,y){s(n,t,h);var g,_,b,w=function(e){if(!d&&e in S)return S[e];switch(e){case"keys":case"values":return function(){return new n(this,e)}}return function(){return new n(this,e)}},x=t+" Iterator",E="values"==m,O=!1,S=e.prototype,T=S[f]||S["@@iterator"]||m&&S[m],k=T||w(m),R=m?E?w("entries"):k:void 0,j="Array"==t&&S.entries||T;if(j&&(b=l(j.call(new e)))!==Object.prototype&&b.next&&(c(b,x,!0),r||"function"==typeof b[f]||a(b,f,p)),E&&T&&"values"!==T.name&&(O=!0,k=function(){return T.call(this)}),r&&!y||!d&&!O&&S[f]||a(S,f,k),u[t]=k,u[x]=p,m)if(g={values:E?k:w("values"),keys:v?k:w("keys"),entries:R},y)for(_ in g)_ in S||i(S,_,g[_]);else o(o.P+o.F*(d||O),t,g);return g}},function(e,t,n){e.exports=n(26)},function(e,t,n){var r=n(25),o=n(37),i=n(252)(!1),a=n(74)("IE_PROTO");e.exports=function(e,t){var n,u=o(e),s=0,c=[];for(n in u)n!=a&&r(u,n)&&c.push(n);for(;t.length>s;)r(u,n=t[s++])&&(~i(c,n)||c.push(n));return c}},function(e,t,n){var r=n(46);e.exports=Object("z").propertyIsEnumerable(0)?Object:function(e){return"String"==r(e)?e.split(""):Object(e)}},function(e,t,n){var r=n(13).document;e.exports=r&&r.documentElement},function(e,t,n){var r=n(129),o=n(82).concat("length","prototype");t.f=Object.getOwnPropertyNames||function(e){return r(e,o)}},function(e,t,n){var r=n(59),o=n(43),i=n(37),a=n(77),u=n(25),s=n(125),c=Object.getOwnPropertyDescriptor;t.f=n(24)?c:function(e,t){if(e=i(e),t=a(t,!0),s)try{return c(e,t)}catch(e){}if(u(e,t))return o(!r.f.call(e,t),e[t])}},function(e,t){},function(e,t,n){"use strict";(function(e){Object.defineProperty(t,"__esModule",{value:!0}),t.__RewireAPI__=t.__ResetDependency__=t.__set__=t.__Rewire__=t.__GetDependency__=t.__get__=t.createMatchSelector=t.getAction=t.getLocation=t.routerMiddleware=t.connectRouter=t.ConnectedRouter=t.routerActions=t.goForward=t.goBack=t.go=t.replace=t.push=t.CALL_HISTORY_METHOD=t.LOCATION_CHANGE=void 0;var r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},o=a(n(273)),i=a(n(284));function a(e){return e&&e.__esModule?e:{default:e}}var u=j("createAll")(j("plainStructure")),s=u.LOCATION_CHANGE,c=u.CALL_HISTORY_METHOD,l=u.push,f=u.replace,d=u.go,p=u.goBack,h=u.goForward,m=u.routerActions,v=u.ConnectedRouter,y=u.connectRouter,g=u.routerMiddleware,_=u.getLocation,b=u.getAction,w=u.createMatchSelector;function x(){try{if(e)return e}catch(e){try{if(window)return window}catch(e){return this}}}t.LOCATION_CHANGE=s,t.CALL_HISTORY_METHOD=c,t.push=l,t.replace=f,t.go=d,t.goBack=p,t.goForward=h,t.routerActions=m,t.ConnectedRouter=v,t.connectRouter=y,t.routerMiddleware=g,t.getLocation=_,t.getAction=b,t.createMatchSelector=w;var E=null;function O(){if(null===E){var e=x();e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__||(e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__=0),E=__$$GLOBAL_REWIRE_NEXT_MODULE_ID__++}return E}function S(){var e=x();return e.__$$GLOBAL_REWIRE_REGISTRY__||(e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)),__$$GLOBAL_REWIRE_REGISTRY__}function T(){var e=O(),t=S(),n=t[e];return n||(t[e]=Object.create(null),n=t[e]),n}!function(){var e=x();e.__rewire_reset_all__||(e.__rewire_reset_all__=function(){e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)})}();var k="__INTENTIONAL_UNDEFINED__",R={};function j(e){var t=T();if(void 0===t[e])return function(e){switch(e){case"createAll":return o.default;case"plainStructure":return i.default}return}(e);var n=t[e];return n===k?void 0:n}function P(e,t){var n=T();if("object"!==(void 0===e?"undefined":r(e)))return n[e]=void 0===t?k:t,function(){C(e)};Object.keys(e).forEach(function(t){n[t]=e[t]})}function C(e){var t=T();delete t[e],0==Object.keys(t).length&&delete S()[O]}function M(e){var t=T(),n=Object.keys(e),r={};function o(){n.forEach(function(e){t[e]=r[e]})}return function(i){n.forEach(function(n){r[n]=t[n],t[n]=e[n]});var a=i();return a&&"function"==typeof a.then?a.then(o).catch(o):o(),a}}!function(){function e(e,t){Object.defineProperty(R,e,{value:t,enumerable:!1,configurable:!0})}e("__get__",j),e("__GetDependency__",j),e("__Rewire__",P),e("__set__",P),e("__reset__",C),e("__ResetDependency__",C),e("__with__",M)}(),t.__get__=j,t.__GetDependency__=j,t.__Rewire__=P,t.__set__=P,t.__ResetDependency__=C,t.__RewireAPI__=R,t.default=R}).call(t,n(3))},function(e,t,n){"use strict";var r=n(38),o=n.n(r),i=n(1),a=n.n(i),u=n(12),s=n.n(u),c=n(27),l=n(63);function f(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}var d=function(e){function t(){var n,r;!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t);for(var o=arguments.length,i=Array(o),a=0;a<o;a++)i[a]=arguments[a];return n=r=f(this,e.call.apply(e,[this].concat(i))),r.history=Object(c.createMemoryHistory)(r.props),f(r,n)}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.componentWillMount=function(){o()(!this.props.history,"<MemoryRouter> ignores the history prop. To use a custom history, use `import { Router }` instead of `import { MemoryRouter as Router }`.")},t.prototype.render=function(){return a.a.createElement(l.a,{history:this.history,children:this.props.children})},t}(a.a.Component);d.propTypes={initialEntries:s.a.array,initialIndex:s.a.number,getUserConfirmation:s.a.func,keyLength:s.a.number,children:s.a.node},t.a=d},function(e,t,n){"use strict";n.d(t,"b",function(){return r}),n.d(t,"a",function(){return o}),n.d(t,"e",function(){return i}),n.d(t,"c",function(){return a}),n.d(t,"g",function(){return u}),n.d(t,"h",function(){return s}),n.d(t,"f",function(){return c}),n.d(t,"d",function(){return l});var r=!("undefined"==typeof window||!window.document||!window.document.createElement),o=function(e,t,n){return e.addEventListener?e.addEventListener(t,n,!1):e.attachEvent("on"+t,n)},i=function(e,t,n){return e.removeEventListener?e.removeEventListener(t,n,!1):e.detachEvent("on"+t,n)},a=function(e,t){return t(window.confirm(e))},u=function(){var e=window.navigator.userAgent;return(-1===e.indexOf("Android 2.")&&-1===e.indexOf("Android 4.0")||-1===e.indexOf("Mobile Safari")||-1!==e.indexOf("Chrome")||-1!==e.indexOf("Windows Phone"))&&(window.history&&"pushState"in window.history)},s=function(){return-1===window.navigator.userAgent.indexOf("Trident")},c=function(){return-1===window.navigator.userAgent.indexOf("Firefox")},l=function(e){return void 0===e.state&&-1===navigator.userAgent.indexOf("CriOS")}},function(e,t,n){"use strict";var r=n(1),o=n.n(r),i=n(12),a=n.n(i),u=n(19),s=n.n(u);var c=function(e){function t(){return function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t),function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}(this,e.apply(this,arguments))}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.enable=function(e){this.unblock&&this.unblock(),this.unblock=this.context.router.history.block(e)},t.prototype.disable=function(){this.unblock&&(this.unblock(),this.unblock=null)},t.prototype.componentWillMount=function(){s()(this.context.router,"You should not use <Prompt> outside a <Router>"),this.props.when&&this.enable(this.props.message)},t.prototype.componentWillReceiveProps=function(e){e.when?this.props.when&&this.props.message===e.message||this.enable(e.message):this.disable()},t.prototype.componentWillUnmount=function(){this.disable()},t.prototype.render=function(){return null},t}(o.a.Component);c.propTypes={when:a.a.bool,message:a.a.oneOfType([a.a.func,a.a.string]).isRequired},c.defaultProps={when:!0},c.contextTypes={router:a.a.shape({history:a.a.shape({block:a.a.func.isRequired}).isRequired}).isRequired},t.a=c},function(e,t,n){"use strict";var r=n(1),o=n.n(r),i=n(12),a=n.n(i),u=n(38),s=n.n(u),c=n(19),l=n.n(c),f=n(27),d=n(88),p=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};var h=function(e){function t(){return function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t),function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}(this,e.apply(this,arguments))}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.isStatic=function(){return this.context.router&&this.context.router.staticContext},t.prototype.componentWillMount=function(){l()(this.context.router,"You should not use <Redirect> outside a <Router>"),this.isStatic()&&this.perform()},t.prototype.componentDidMount=function(){this.isStatic()||this.perform()},t.prototype.componentDidUpdate=function(e){var t=Object(f.createLocation)(e.to),n=Object(f.createLocation)(this.props.to);Object(f.locationsAreEqual)(t,n)?s()(!1,"You tried to redirect to the same route you're currently on: \""+n.pathname+n.search+'"'):this.perform()},t.prototype.computeTo=function(e){var t=e.computedMatch,n=e.to;return t?"string"==typeof n?Object(d.a)(n,t.params):p({},n,{pathname:Object(d.a)(n.pathname,t.params)}):n},t.prototype.perform=function(){var e=this.context.router.history,t=this.props.push,n=this.computeTo(this.props);t?e.push(n):e.replace(n)},t.prototype.render=function(){return null},t}(o.a.Component);h.propTypes={computedMatch:a.a.object,push:a.a.bool,from:a.a.string,to:a.a.oneOfType([a.a.string,a.a.object]).isRequired},h.defaultProps={push:!1},h.contextTypes={router:a.a.shape({history:a.a.shape({push:a.a.func.isRequired,replace:a.a.func.isRequired}).isRequired,staticContext:a.a.object}).isRequired},t.a=h},function(e,t,n){var r=n(280);e.exports=h,e.exports.parse=i,e.exports.compile=function(e,t){return s(i(e,t))},e.exports.tokensToFunction=s,e.exports.tokensToRegExp=p;var o=new RegExp(["(\\\\.)","([\\/.])?(?:(?:\\:(\\w+)(?:\\(((?:\\\\.|[^\\\\()])+)\\))?|\\(((?:\\\\.|[^\\\\()])+)\\))([+*?])?|(\\*))"].join("|"),"g");function i(e,t){for(var n,r=[],i=0,a=0,u="",s=t&&t.delimiter||"/";null!=(n=o.exec(e));){var f=n[0],d=n[1],p=n.index;if(u+=e.slice(a,p),a=p+f.length,d)u+=d[1];else{var h=e[a],m=n[2],v=n[3],y=n[4],g=n[5],_=n[6],b=n[7];u&&(r.push(u),u="");var w=null!=m&&null!=h&&h!==m,x="+"===_||"*"===_,E="?"===_||"*"===_,O=n[2]||s,S=y||g;r.push({name:v||i++,prefix:m||"",delimiter:O,optional:E,repeat:x,partial:w,asterisk:!!b,pattern:S?l(S):b?".*":"[^"+c(O)+"]+?"})}}return a<e.length&&(u+=e.substr(a)),u&&r.push(u),r}function a(e){return encodeURI(e).replace(/[\/?#]/g,function(e){return"%"+e.charCodeAt(0).toString(16).toUpperCase()})}function u(e){return encodeURI(e).replace(/[?#]/g,function(e){return"%"+e.charCodeAt(0).toString(16).toUpperCase()})}function s(e){for(var t=new Array(e.length),n=0;n<e.length;n++)"object"==typeof e[n]&&(t[n]=new RegExp("^(?:"+e[n].pattern+")$"));return function(n,o){for(var i="",s=n||{},c=(o||{}).pretty?a:encodeURIComponent,l=0;l<e.length;l++){var f=e[l];if("string"!=typeof f){var d,p=s[f.name];if(null==p){if(f.optional){f.partial&&(i+=f.prefix);continue}throw new TypeError('Expected "'+f.name+'" to be defined')}if(r(p)){if(!f.repeat)throw new TypeError('Expected "'+f.name+'" to not repeat, but received `'+JSON.stringify(p)+"`");if(0===p.length){if(f.optional)continue;throw new TypeError('Expected "'+f.name+'" to not be empty')}for(var h=0;h<p.length;h++){if(d=c(p[h]),!t[l].test(d))throw new TypeError('Expected all "'+f.name+'" to match "'+f.pattern+'", but received `'+JSON.stringify(d)+"`");i+=(0===h?f.prefix:f.delimiter)+d}}else{if(d=f.asterisk?u(p):c(p),!t[l].test(d))throw new TypeError('Expected "'+f.name+'" to match "'+f.pattern+'", but received "'+d+'"');i+=f.prefix+d}}else i+=f}return i}}function c(e){return e.replace(/([.+*?=^!:${}()[\]|\/\\])/g,"\\$1")}function l(e){return e.replace(/([=!:$\/()])/g,"\\$1")}function f(e,t){return e.keys=t,e}function d(e){return e.sensitive?"":"i"}function p(e,t,n){r(t)||(n=t||n,t=[]);for(var o=(n=n||{}).strict,i=!1!==n.end,a="",u=0;u<e.length;u++){var s=e[u];if("string"==typeof s)a+=c(s);else{var l=c(s.prefix),p="(?:"+s.pattern+")";t.push(s),s.repeat&&(p+="(?:"+l+p+")*"),a+=p=s.optional?s.partial?l+"("+p+")?":"(?:"+l+"("+p+"))?":l+"("+p+")"}}var h=c(n.delimiter||"/"),m=a.slice(-h.length)===h;return o||(a=(m?a.slice(0,-h.length):a)+"(?:"+h+"(?=$))?"),a+=i?"$":o&&m?"":"(?="+h+"|$)",f(new RegExp("^"+a,d(n)),t)}function h(e,t,n){return r(t)||(n=t||n,t=[]),n=n||{},e instanceof RegExp?function(e,t){var n=e.source.match(/\((?!\?)/g);if(n)for(var r=0;r<n.length;r++)t.push({name:r,prefix:null,delimiter:null,optional:!1,repeat:!1,partial:!1,asterisk:!1,pattern:null});return f(e,t)}(e,t):r(e)?function(e,t,n){for(var r=[],o=0;o<e.length;o++)r.push(h(e[o],t,n).source);return f(new RegExp("(?:"+r.join("|")+")",d(n)),t)}(e,t,n):function(e,t,n){return p(i(e,n),t,n)}(e,t,n)}},function(e,t,n){"use strict";var r=n(38),o=n.n(r),i=n(19),a=n.n(i),u=n(1),s=n.n(u),c=n(12),l=n.n(c),f=n(27),d=n(63),p=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};function h(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}var m=function(e){return"/"===e.charAt(0)?e:"/"+e},v=function(e,t){return e?p({},t,{pathname:m(e)+t.pathname}):t},y=function(e){return"string"==typeof e?e:Object(f.createPath)(e)},g=function(e){return function(){a()(!1,"You cannot %s with <StaticRouter>",e)}},_=function(){},b=function(e){function t(){var n,r;!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t);for(var o=arguments.length,i=Array(o),a=0;a<o;a++)i[a]=arguments[a];return n=r=h(this,e.call.apply(e,[this].concat(i))),r.createHref=function(e){return m(r.props.basename+y(e))},r.handlePush=function(e){var t=r.props,n=t.basename,o=t.context;o.action="PUSH",o.location=v(n,Object(f.createLocation)(e)),o.url=y(o.location)},r.handleReplace=function(e){var t=r.props,n=t.basename,o=t.context;o.action="REPLACE",o.location=v(n,Object(f.createLocation)(e)),o.url=y(o.location)},r.handleListen=function(){return _},r.handleBlock=function(){return _},h(r,n)}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.getChildContext=function(){return{router:{staticContext:this.props.context}}},t.prototype.componentWillMount=function(){o()(!this.props.history,"<StaticRouter> ignores the history prop. To use a custom history, use `import { Router }` instead of `import { StaticRouter as Router }`.")},t.prototype.render=function(){var e=this.props,t=e.basename,n=(e.context,e.location),r=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(e,["basename","context","location"]),o={createHref:this.createHref,action:"POP",location:function(e,t){if(!e)return t;var n=m(e);return 0!==t.pathname.indexOf(n)?t:p({},t,{pathname:t.pathname.substr(n.length)})}(t,Object(f.createLocation)(n)),push:this.handlePush,replace:this.handleReplace,go:g("go"),goBack:g("goBack"),goForward:g("goForward"),listen:this.handleListen,block:this.handleBlock};return s.a.createElement(d.a,p({},r,{history:o}))},t}(s.a.Component);b.propTypes={basename:l.a.string,context:l.a.object.isRequired,location:l.a.oneOfType([l.a.string,l.a.object])},b.defaultProps={basename:"",location:"/"},b.childContextTypes={router:l.a.object.isRequired},t.a=b},function(e,t,n){"use strict";var r=n(1),o=n.n(r),i=n(12),a=n.n(i),u=n(38),s=n.n(u),c=n(19),l=n.n(c),f=n(64);var d=function(e){function t(){return function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t),function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}(this,e.apply(this,arguments))}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.componentWillMount=function(){l()(this.context.router,"You should not use <Switch> outside a <Router>")},t.prototype.componentWillReceiveProps=function(e){s()(!(e.location&&!this.props.location),'<Switch> elements should not change from uncontrolled to controlled (or vice versa). You initially used no "location" prop and then provided one on a subsequent render.'),s()(!(!e.location&&this.props.location),'<Switch> elements should not change from controlled to uncontrolled (or vice versa). You provided a "location" prop initially but omitted it on a subsequent render.')},t.prototype.render=function(){var e=this.context.router.route,t=this.props.children,n=this.props.location||e.location,r=void 0,i=void 0;return o.a.Children.forEach(t,function(t){if(null==r&&o.a.isValidElement(t)){var a=t.props,u=a.path,s=a.exact,c=a.strict,l=a.sensitive,d=a.from,p=u||d;i=t,r=Object(f.a)(n.pathname,{path:p,exact:s,strict:c,sensitive:l},e.match)}}),r?o.a.cloneElement(i,{location:n,computedMatch:r}):null},t}(o.a.Component);d.contextTypes={router:a.a.shape({route:a.a.object.isRequired}).isRequired},d.propTypes={children:a.a.node,location:a.a.object},t.a=d},function(e,t,n){"use strict";var r=n(1),o=n.n(r),i=n(12),a=n.n(i),u=n(118),s=n.n(u),c=n(89),l=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};t.a=function(e){var t=function(t){var n=t.wrappedComponentRef,r=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(t,["wrappedComponentRef"]);return o.a.createElement(c.a,{children:function(t){return o.a.createElement(e,l({},r,t,{ref:n}))}})};return t.displayName="withRouter("+(e.displayName||e.name)+")",t.WrappedComponent=e,t.propTypes={wrappedComponentRef:a.a.func},s()(t,e)}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),n.d(t,"createStore",function(){return s}),n.d(t,"combineReducers",function(){return l}),n.d(t,"bindActionCreators",function(){return d}),n.d(t,"applyMiddleware",function(){return h}),n.d(t,"compose",function(){return p}),n.d(t,"__DO_NOT_USE__ActionTypes",function(){return o});var r=n(290),o={INIT:"@@redux/INIT"+Math.random().toString(36).substring(7).split("").join("."),REPLACE:"@@redux/REPLACE"+Math.random().toString(36).substring(7).split("").join(".")},i="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},a=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};function u(e){if("object"!==(void 0===e?"undefined":i(e))||null===e)return!1;for(var t=e;null!==Object.getPrototypeOf(t);)t=Object.getPrototypeOf(t);return Object.getPrototypeOf(e)===t}function s(e,t,n){var a;if("function"==typeof t&&void 0===n&&(n=t,t=void 0),void 0!==n){if("function"!=typeof n)throw new Error("Expected the enhancer to be a function.");return n(s)(e,t)}if("function"!=typeof e)throw new Error("Expected the reducer to be a function.");var c=e,l=t,f=[],d=f,p=!1;function h(){d===f&&(d=f.slice())}function m(){if(p)throw new Error("You may not call store.getState() while the reducer is executing. The reducer has already received the state as an argument. Pass it down from the top reducer instead of reading it from the store.");return l}function v(e){if("function"!=typeof e)throw new Error("Expected the listener to be a function.");if(p)throw new Error("You may not call store.subscribe() while the reducer is executing. If you would like to be notified after the store has been updated, subscribe from a component and invoke store.getState() in the callback to access the latest state. See https://redux.js.org/api-reference/store#subscribe(listener) for more details.");var t=!0;return h(),d.push(e),function(){if(t){if(p)throw new Error("You may not unsubscribe from a store listener while the reducer is executing. See https://redux.js.org/api-reference/store#subscribe(listener) for more details.");t=!1,h();var n=d.indexOf(e);d.splice(n,1)}}}function y(e){if(!u(e))throw new Error("Actions must be plain objects. Use custom middleware for async actions.");if(void 0===e.type)throw new Error('Actions may not have an undefined "type" property. Have you misspelled a constant?');if(p)throw new Error("Reducers may not dispatch actions.");try{p=!0,l=c(l,e)}finally{p=!1}for(var t=f=d,n=0;n<t.length;n++){(0,t[n])()}return e}return y({type:o.INIT}),(a={dispatch:y,subscribe:v,getState:m,replaceReducer:function(e){if("function"!=typeof e)throw new Error("Expected the nextReducer to be a function.");c=e,y({type:o.REPLACE})}})[r.a]=function(){var e,t=v;return(e={subscribe:function(e){if("object"!==(void 0===e?"undefined":i(e))||null===e)throw new TypeError("Expected the observer to be an object.");function n(){e.next&&e.next(m())}return n(),{unsubscribe:t(n)}}})[r.a]=function(){return this},e},a}function c(e,t){var n=t&&t.type;return"Given "+(n&&'action "'+String(n)+'"'||"an action")+', reducer "'+e+'" returned undefined. To ignore an action, you must explicitly return the previous state. If you want this reducer to hold no value, you can return null instead of undefined.'}function l(e){for(var t=Object.keys(e),n={},r=0;r<t.length;r++){var i=t[r];0,"function"==typeof e[i]&&(n[i]=e[i])}var a=Object.keys(n);var u=void 0;try{!function(e){Object.keys(e).forEach(function(t){var n=e[t];if(void 0===n(void 0,{type:o.INIT}))throw new Error('Reducer "'+t+"\" returned undefined during initialization. If the state passed to the reducer is undefined, you must explicitly return the initial state. The initial state may not be undefined. If you don't want to set a value for this reducer, you can use null instead of undefined.");if(void 0===n(void 0,{type:"@@redux/PROBE_UNKNOWN_ACTION_"+Math.random().toString(36).substring(7).split("").join(".")}))throw new Error('Reducer "'+t+"\" returned undefined when probed with a random type. Don't try to handle "+o.INIT+' or other actions in "redux/*" namespace. They are considered private. Instead, you must return the current state for any unknown actions, unless it is undefined, in which case you must return the initial state, regardless of the action type. The initial state may not be undefined, but can be null.')})}(n)}catch(e){u=e}return function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=arguments[1];if(u)throw u;for(var r=!1,o={},i=0;i<a.length;i++){var s=a[i],l=n[s],f=e[s],d=l(f,t);if(void 0===d){var p=c(s,t);throw new Error(p)}o[s]=d,r=r||d!==f}return r?o:e}}function f(e,t){return function(){return t(e.apply(this,arguments))}}function d(e,t){if("function"==typeof e)return f(e,t);if("object"!==(void 0===e?"undefined":i(e))||null===e)throw new Error("bindActionCreators expected an object or a function, instead received "+(null===e?"null":void 0===e?"undefined":i(e))+'. Did you write "import ActionCreators from" instead of "import * as ActionCreators from"?');for(var n=Object.keys(e),r={},o=0;o<n.length;o++){var a=n[o],u=e[a];"function"==typeof u&&(r[a]=f(u,t))}return r}function p(){for(var e=arguments.length,t=Array(e),n=0;n<e;n++)t[n]=arguments[n];return 0===t.length?function(e){return e}:1===t.length?t[0]:t.reduce(function(e,t){return function(){return e(t.apply(void 0,arguments))}})}function h(){for(var e=arguments.length,t=Array(e),n=0;n<e;n++)t[n]=arguments[n];return function(e){return function(){for(var n=arguments.length,r=Array(n),o=0;o<n;o++)r[o]=arguments[o];var i=e.apply(void 0,r),u=function(){throw new Error("Dispatching while constructing your middleware is not allowed. Other middleware would not be applied to this dispatch.")},s={getState:i.getState,dispatch:function(){return u.apply(void 0,arguments)}},c=t.map(function(e){return e(s)});return u=p.apply(void 0,c)(i.dispatch),a({},i,{dispatch:u})}}}},function(e,t,n){e.exports=n(294)()},function(e,t,n){"use strict";n.d(t,"b",function(){return i}),n.d(t,"a",function(){return a});var r=n(145),o=n.n(r),i=o.a.shape({trySubscribe:o.a.func.isRequired,tryUnsubscribe:o.a.func.isRequired,notifyNestedSubs:o.a.func.isRequired,isSubscribed:o.a.func.isRequired}),a=o.a.shape({subscribe:o.a.func.isRequired,dispatch:o.a.func.isRequired,getState:o.a.func.isRequired})},function(e,t,n){"use strict";t.a=function(e){var t,n,r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},i=r.getDisplayName,h=void 0===i?function(e){return"ConnectAdvanced("+e+")"}:i,m=r.methodName,v=void 0===m?"connectAdvanced":m,y=r.renderCountProp,g=void 0===y?void 0:y,_=r.shouldHandleStateChanges,b=void 0===_||_,w=r.storeKey,x=void 0===w?"store":w,E=r.withRef,O=void 0!==E&&E,S=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(r,["getDisplayName","methodName","renderCountProp","shouldHandleStateChanges","storeKey","withRef"]),T=x+"Subscription",k=f++,R=((t={})[x]=c.a,t[T]=c.b,t),j=((n={})[T]=c.b,n);return function(t){a()("function"==typeof t,"You must pass a component to the function returned by "+v+". Instead received "+JSON.stringify(t));var n=t.displayName||t.name||"Component",r=h(n),i=l({},S,{getDisplayName:h,methodName:v,renderCountProp:g,shouldHandleStateChanges:b,storeKey:x,withRef:O,displayName:r,wrappedComponentName:n,WrappedComponent:t}),c=function(n){function o(e,t){!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,o);var i=function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}(this,n.call(this,e,t));return i.version=k,i.state={},i.renderCount=0,i.store=e[x]||t[x],i.propsMode=Boolean(e[x]),i.setWrappedInstance=i.setWrappedInstance.bind(i),a()(i.store,'Could not find "'+x+'" in either the context or props of "'+r+'". Either wrap the root component in a <Provider>, or explicitly pass "'+x+'" as a prop to "'+r+'".'),i.initSelector(),i.initSubscription(),i}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(o,n),o.prototype.getChildContext=function(){var e,t=this.propsMode?null:this.subscription;return(e={})[T]=t||this.context[T],e},o.prototype.componentDidMount=function(){b&&(this.subscription.trySubscribe(),this.selector.run(this.props),this.selector.shouldComponentUpdate&&this.forceUpdate())},o.prototype.componentWillReceiveProps=function(e){this.selector.run(e)},o.prototype.shouldComponentUpdate=function(){return this.selector.shouldComponentUpdate},o.prototype.componentWillUnmount=function(){this.subscription&&this.subscription.tryUnsubscribe(),this.subscription=null,this.notifyNestedSubs=p,this.store=null,this.selector.run=p,this.selector.shouldComponentUpdate=!1},o.prototype.getWrappedInstance=function(){return a()(O,"To access the wrapped instance, you need to specify { withRef: true } in the options argument of the "+v+"() call."),this.wrappedInstance},o.prototype.setWrappedInstance=function(e){this.wrappedInstance=e},o.prototype.initSelector=function(){var t=e(this.store.dispatch,i);this.selector=function(e,t){var n={run:function(r){try{var o=e(t.getState(),r);(o!==n.props||n.error)&&(n.shouldComponentUpdate=!0,n.props=o,n.error=null)}catch(e){n.shouldComponentUpdate=!0,n.error=e}}};return n}(t,this.store),this.selector.run(this.props)},o.prototype.initSubscription=function(){if(b){var e=(this.propsMode?this.props:this.context)[T];this.subscription=new s.a(this.store,e,this.onStateChange.bind(this)),this.notifyNestedSubs=this.subscription.notifyNestedSubs.bind(this.subscription)}},o.prototype.onStateChange=function(){this.selector.run(this.props),this.selector.shouldComponentUpdate?(this.componentDidUpdate=this.notifyNestedSubsOnComponentDidUpdate,this.setState(d)):this.notifyNestedSubs()},o.prototype.notifyNestedSubsOnComponentDidUpdate=function(){this.componentDidUpdate=void 0,this.notifyNestedSubs()},o.prototype.isSubscribed=function(){return Boolean(this.subscription)&&this.subscription.isSubscribed()},o.prototype.addExtraProps=function(e){if(!(O||g||this.propsMode&&this.subscription))return e;var t=l({},e);return O&&(t.ref=this.setWrappedInstance),g&&(t[g]=this.renderCount++),this.propsMode&&this.subscription&&(t[T]=this.subscription),t},o.prototype.render=function(){var e=this.selector;if(e.shouldComponentUpdate=!1,e.error)throw e.error;return Object(u.createElement)(t,this.addExtraProps(e.props))},o}(u.Component);return c.WrappedComponent=t,c.displayName=r,c.childContextTypes=j,c.contextTypes=R,c.propTypes=R,o()(c,t)}};var r=n(296),o=n.n(r),i=n(297),a=n.n(i),u=n(90),s=(n.n(u),n(298)),c=n(146),l=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};var f=0,d={};function p(){}},function(e,t,n){"use strict";t.a=function(e){return function(t,n){var r=e(t,n);function o(){return r}return o.dependsOnOwnProps=!1,o}},t.b=function(e,t){return function(t,n){n.displayName;var o=function(e,t){return o.dependsOnOwnProps?o.mapToProps(e,t):o.mapToProps(e)};return o.dependsOnOwnProps=!0,o.mapToProps=function(t,n){o.mapToProps=e,o.dependsOnOwnProps=r(e);var i=o(t,n);return"function"==typeof i&&(o.mapToProps=i,o.dependsOnOwnProps=r(i),i=o(t,n)),i},o}};n(149);function r(e){return null!==e.dependsOnOwnProps&&void 0!==e.dependsOnOwnProps?Boolean(e.dependsOnOwnProps):1!==e.length}},function(e,t,n){"use strict";n(302),n(91)},function(e,t,n){"use strict";var r=n(304).a.Symbol;t.a=r},function(e,t,n){"use strict";var r=function(){};e.exports=r},function(e,t,n){"use strict";var r=n(1),o=n.n(r),i=n(12),a=n.n(i),u=n(19),s=n.n(u),c=n(27),l=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};function f(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}var d=function(e){return!!(e.metaKey||e.altKey||e.ctrlKey||e.shiftKey)},p=function(e){function t(){var n,r;!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t);for(var o=arguments.length,i=Array(o),a=0;a<o;a++)i[a]=arguments[a];return n=r=f(this,e.call.apply(e,[this].concat(i))),r.handleClick=function(e){if(r.props.onClick&&r.props.onClick(e),!e.defaultPrevented&&0===e.button&&!r.props.target&&!d(e)){e.preventDefault();var t=r.context.router.history,n=r.props,o=n.replace,i=n.to;o?t.replace(i):t.push(i)}},f(r,n)}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.render=function(){var e=this.props,t=(e.replace,e.to),n=e.innerRef,r=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(e,["replace","to","innerRef"]);s()(this.context.router,"You should not use <Link> outside a <Router>"),s()(void 0!==t,'You must specify the "to" property');var i=this.context.router.history,a="string"==typeof t?Object(c.createLocation)(t,null,null,i.location):t,u=i.createHref(a);return o.a.createElement("a",l({},r,{onClick:this.handleClick,href:u,ref:n}))},t}(o.a.Component);p.propTypes={onClick:a.a.func,target:a.a.string,replace:a.a.bool,to:a.a.oneOfType([a.a.string,a.a.object]).isRequired,innerRef:a.a.oneOfType([a.a.string,a.a.func])},p.defaultProps={replace:!1},p.contextTypes={router:a.a.shape({history:a.a.shape({push:a.a.func.isRequired,replace:a.a.func.isRequired,createHref:a.a.func.isRequired}).isRequired}).isRequired},t.a=p},function(e,t,n){"use strict";var r=n(89);t.a=r.a},function(e,t,n){e.exports={default:n(349),__esModule:!0}},function(e,t,n){var r=n(20);e.exports=function(e,t,n,o){try{return o?t(r(n)[0],n[1]):t(n)}catch(t){var i=e.return;throw void 0!==i&&r(i.call(e)),t}}},function(e,t,n){var r=n(36),o=n(14)("iterator"),i=Array.prototype;e.exports=function(e){return void 0!==e&&(r.Array===e||i[o]===e)}},function(e,t,n){var r=n(14)("iterator"),o=!1;try{var i=[7][r]();i.return=function(){o=!0},Array.from(i,function(){throw 2})}catch(e){}e.exports=function(e,t){if(!t&&!o)return!1;var n=!1;try{var i=[7],a=i[r]();a.next=function(){return{done:n=!0}},i[r]=function(){return a},e(i)}catch(e){}return n}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=function(e){return e&&e.__esModule?e:{default:e}}(n(352)).default.namespace("vcat.search");t.default=r},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.setSaved=t.getSaved=t.getCountFromStore=t.getSavedFromStore=t.getSavedCount=t.getSavedUrls=void 0;var r=s(n(93)),o=s(n(28)),i=s(n(158)),a=n(94),u=n(17);function s(e){return e&&e.__esModule?e:{default:e}}t.getSavedUrls=function(){var e=c();return(0,o.default)(e).sort().map(function(t){var n=e[t],r=n.verified,i=n.hash,a=n.frames;return(0,o.default)(a).map(function(e){return a[e]&&(0,u.imageUrl)(r,i,e)}).filter(function(e){return!!e})}).reduce(function(e,t){return t&&t.length?e.concat(t):e},[])},t.getSavedCount=function(e){return e=e||c(),(0,o.default)(e).sort().map(function(t){var n=e[t].frames;return(0,o.default)(n).filter(function(e){return n[e]}).filter(function(e){return!!e}).length}).reduce(function(e,t){return e+t},0)};var c=t.getSavedFromStore=function(){return a.store.getState().review.saved};t.getCountFromStore=function(){return a.store.getState().review.count||0},t.getSaved=function(){try{return JSON.parse((0,i.default)("saved"))||{}}catch(e){return console.log("error getting saved!",e),{}}},t.setSaved=function(e){try{(0,i.default)("saved",(0,r.default)(e))}catch(e){console.log("error setting saved!",e)}}},function(e,t,n){"use strict";t.__esModule=!0;var r=i(n(359)),o=i(n(362));function i(e){return e&&e.__esModule?e:{default:e}}t.default=function(){return function(e,t){if(Array.isArray(e))return e;if((0,r.default)(Object(e)))return function(e,t){var n=[],r=!0,i=!1,a=void 0;try{for(var u,s=(0,o.default)(e);!(r=(u=s.next()).done)&&(n.push(u.value),!t||n.length!==t);r=!0);}catch(e){i=!0,a=e}finally{try{!r&&s.return&&s.return()}finally{if(i)throw a}}return n}(e,t);throw new TypeError("Invalid attempt to destructure non-iterable instance")}}()},function(e,t,n){var r=n(20),o=n(56),i=n(14)("species");e.exports=function(e,t){var n,a=r(e).constructor;return void 0===a||void 0==(n=r(a)[i])?t:o(n)}},function(e,t,n){var r,o,i,a=n(34),u=n(370),s=n(131),c=n(76),l=n(13),f=l.process,d=l.setImmediate,p=l.clearImmediate,h=l.MessageChannel,m=l.Dispatch,v=0,y={},g=function(){var e=+this;if(y.hasOwnProperty(e)){var t=y[e];delete y[e],t()}},_=function(e){g.call(e.data)};d&&p||(d=function(e){for(var t=[],n=1;arguments.length>n;)t.push(arguments[n++]);return y[++v]=function(){u("function"==typeof e?e:Function(e),t)},r(v),v},p=function(e){delete y[e]},"process"==n(46)(f)?r=function(e){f.nextTick(a(g,e,1))}:m&&m.now?r=function(e){m.now(a(g,e,1))}:h?(i=(o=new h).port2,o.port1.onmessage=_,r=a(i.postMessage,i,1)):l.addEventListener&&"function"==typeof postMessage&&!l.importScripts?(r=function(e){l.postMessage(e+"","*")},l.addEventListener("message",_,!1)):r="onreadystatechange"in c("script")?function(e){s.appendChild(c("script")).onreadystatechange=function(){s.removeChild(this),g.call(e)}}:function(e){setTimeout(a(g,e,1),0)}),e.exports={set:d,clear:p}},function(e,t){e.exports=function(e){try{return{e:!1,v:e()}}catch(e){return{e:!0,v:e}}}},function(e,t,n){var r=n(20),o=n(23),i=n(98);e.exports=function(e,t){if(r(e),o(t)&&t.constructor===e)return t;var n=i.f(e);return(0,n.resolve)(t),n.promise}},function(e,t,n){e.exports={addDays:n(50),addHours:n(166),addISOYears:n(167),addMilliseconds:n(51),addMinutes:n(169),addMonths:n(68),addQuarters:n(170),addSeconds:n(171),addWeeks:n(101),addYears:n(172),areRangesOverlapping:n(377),closestIndexTo:n(378),closestTo:n(379),compareAsc:n(53),compareDesc:n(102),differenceInCalendarDays:n(67),differenceInCalendarISOWeeks:n(380),differenceInCalendarISOYears:n(173),differenceInCalendarMonths:n(174),differenceInCalendarQuarters:n(381),differenceInCalendarWeeks:n(382),differenceInCalendarYears:n(176),differenceInDays:n(177),differenceInHours:n(383),differenceInISOYears:n(384),differenceInMilliseconds:n(69),differenceInMinutes:n(385),differenceInMonths:n(103),differenceInQuarters:n(386),differenceInSeconds:n(104),differenceInWeeks:n(387),differenceInYears:n(388),distanceInWords:n(179),distanceInWordsStrict:n(392),distanceInWordsToNow:n(393),eachDay:n(394),endOfDay:n(106),endOfHour:n(395),endOfISOWeek:n(396),endOfISOYear:n(397),endOfMinute:n(398),endOfMonth:n(181),endOfQuarter:n(399),endOfSecond:n(400),endOfToday:n(401),endOfTomorrow:n(402),endOfWeek:n(180),endOfYear:n(403),endOfYesterday:n(404),format:n(405),getDate:n(406),getDay:n(407),getDayOfYear:n(182),getDaysInMonth:n(100),getDaysInYear:n(408),getHours:n(409),getISODay:n(186),getISOWeek:n(107),getISOWeeksInYear:n(410),getISOYear:n(29),getMilliseconds:n(411),getMinutes:n(412),getMonth:n(413),getOverlappingDaysInRanges:n(414),getQuarter:n(175),getSeconds:n(415),getTime:n(416),getYear:n(417),isAfter:n(418),isBefore:n(419),isDate:n(99),isEqual:n(420),isFirstDayOfMonth:n(421),isFriday:n(422),isFuture:n(423),isLastDayOfMonth:n(424),isLeapYear:n(185),isMonday:n(425),isPast:n(426),isSameDay:n(427),isSameHour:n(187),isSameISOWeek:n(189),isSameISOYear:n(190),isSameMinute:n(191),isSameMonth:n(193),isSameQuarter:n(194),isSameSecond:n(196),isSameWeek:n(108),isSameYear:n(198),isSaturday:n(428),isSunday:n(429),isThisHour:n(430),isThisISOWeek:n(431),isThisISOYear:n(432),isThisMinute:n(433),isThisMonth:n(434),isThisQuarter:n(435),isThisSecond:n(436),isThisWeek:n(437),isThisYear:n(438),isThursday:n(439),isToday:n(440),isTomorrow:n(441),isTuesday:n(442),isValid:n(184),isWednesday:n(443),isWeekend:n(444),isWithinRange:n(445),isYesterday:n(446),lastDayOfISOWeek:n(447),lastDayOfISOYear:n(448),lastDayOfMonth:n(449),lastDayOfQuarter:n(450),lastDayOfWeek:n(199),lastDayOfYear:n(451),max:n(452),min:n(453),parse:n(0),setDate:n(454),setDay:n(455),setDayOfYear:n(456),setHours:n(457),setISODay:n(458),setISOWeek:n(459),setISOYear:n(168),setMilliseconds:n(460),setMinutes:n(461),setMonth:n(200),setQuarter:n(462),setSeconds:n(463),setYear:n(464),startOfDay:n(31),startOfHour:n(188),startOfISOWeek:n(30),startOfISOYear:n(52),startOfMinute:n(192),startOfMonth:n(465),startOfQuarter:n(195),startOfSecond:n(197),startOfToday:n(466),startOfTomorrow:n(467),startOfWeek:n(66),startOfYear:n(183),startOfYesterday:n(468),subDays:n(469),subHours:n(470),subISOYears:n(178),subMilliseconds:n(471),subMinutes:n(472),subMonths:n(473),subQuarters:n(474),subSeconds:n(475),subWeeks:n(476),subYears:n(477)}},function(e,t,n){var r=n(51),o=36e5;e.exports=function(e,t){var n=Number(t);return r(e,n*o)}},function(e,t,n){var r=n(29),o=n(168);e.exports=function(e,t){var n=Number(t);return o(e,r(e)+n)}},function(e,t,n){var r=n(0),o=n(52),i=n(67);e.exports=function(e,t){var n=r(e),a=Number(t),u=i(n,o(n)),s=new Date(0);return s.setFullYear(a,0,4),s.setHours(0,0,0,0),(n=o(s)).setDate(n.getDate()+u),n}},function(e,t,n){var r=n(51),o=6e4;e.exports=function(e,t){var n=Number(t);return r(e,n*o)}},function(e,t,n){var r=n(68);e.exports=function(e,t){var n=Number(t);return r(e,3*n)}},function(e,t,n){var r=n(51);e.exports=function(e,t){var n=Number(t);return r(e,1e3*n)}},function(e,t,n){var r=n(68);e.exports=function(e,t){var n=Number(t);return r(e,12*n)}},function(e,t,n){var r=n(29);e.exports=function(e,t){return r(e)-r(t)}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=r(t);return 12*(n.getFullYear()-o.getFullYear())+(n.getMonth()-o.getMonth())}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e);return Math.floor(t.getMonth()/3)+1}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=r(t);return n.getFullYear()-o.getFullYear()}},function(e,t,n){var r=n(0),o=n(67),i=n(53);e.exports=function(e,t){var n=r(e),a=r(t),u=i(n,a),s=Math.abs(o(n,a));return n.setDate(n.getDate()-u*s),u*(s-(i(n,a)===-u))}},function(e,t,n){var r=n(167);e.exports=function(e,t){var n=Number(t);return r(e,-n)}},function(e,t,n){var r=n(102),o=n(0),i=n(104),a=n(103),u=n(105),s=1440,c=2520,l=43200,f=86400;e.exports=function(e,t,n){var d=n||{},p=r(e,t),h=d.locale,m=u.distanceInWords.localize;h&&h.distanceInWords&&h.distanceInWords.localize&&(m=h.distanceInWords.localize);var v,y,g={addSuffix:Boolean(d.addSuffix),comparison:p};p>0?(v=o(e),y=o(t)):(v=o(t),y=o(e));var _,b=i(y,v),w=y.getTimezoneOffset()-v.getTimezoneOffset(),x=Math.round(b/60)-w;if(x<2)return d.includeSeconds?b<5?m("lessThanXSeconds",5,g):b<10?m("lessThanXSeconds",10,g):b<20?m("lessThanXSeconds",20,g):b<40?m("halfAMinute",null,g):m(b<60?"lessThanXMinutes":"xMinutes",1,g):0===x?m("lessThanXMinutes",1,g):m("xMinutes",x,g);if(x<45)return m("xMinutes",x,g);if(x<90)return m("aboutXHours",1,g);if(x<s)return m("aboutXHours",Math.round(x/60),g);if(x<c)return m("xDays",1,g);if(x<l)return m("xDays",Math.round(x/s),g);if(x<f)return m("aboutXMonths",_=Math.round(x/l),g);if((_=a(y,v))<12)return m("xMonths",Math.round(x/l),g);var E=_%12,O=Math.floor(_/12);return E<3?m("aboutXYears",O,g):E<9?m("overXYears",O,g):m("almostXYears",O+1,g)}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=t&&Number(t.weekStartsOn)||0,o=r(e),i=o.getDay(),a=6+(i<n?-7:0)-(i-n);return o.setDate(o.getDate()+a),o.setHours(23,59,59,999),o}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e),n=t.getMonth();return t.setFullYear(t.getFullYear(),n+1,0),t.setHours(23,59,59,999),t}},function(e,t,n){var r=n(0),o=n(183),i=n(67);e.exports=function(e){var t=r(e);return i(t,o(t))+1}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e),n=new Date(0);return n.setFullYear(t.getFullYear(),0,1),n.setHours(0,0,0,0),n}},function(e,t,n){var r=n(99);e.exports=function(e){if(r(e))return!isNaN(e);throw new TypeError(toString.call(e)+" is not an instance of Date")}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e).getFullYear();return t%400==0||t%4==0&&t%100!=0}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e).getDay();return 0===t&&(t=7),t}},function(e,t,n){var r=n(188);e.exports=function(e,t){var n=r(e),o=r(t);return n.getTime()===o.getTime()}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e);return t.setMinutes(0,0,0),t}},function(e,t,n){var r=n(108);e.exports=function(e,t){return r(e,t,{weekStartsOn:1})}},function(e,t,n){var r=n(52);e.exports=function(e,t){var n=r(e),o=r(t);return n.getTime()===o.getTime()}},function(e,t,n){var r=n(192);e.exports=function(e,t){var n=r(e),o=r(t);return n.getTime()===o.getTime()}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e);return t.setSeconds(0,0),t}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=r(t);return n.getFullYear()===o.getFullYear()&&n.getMonth()===o.getMonth()}},function(e,t,n){var r=n(195);e.exports=function(e,t){var n=r(e),o=r(t);return n.getTime()===o.getTime()}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e),n=t.getMonth(),o=n-n%3;return t.setMonth(o,1),t.setHours(0,0,0,0),t}},function(e,t,n){var r=n(197);e.exports=function(e,t){var n=r(e),o=r(t);return n.getTime()===o.getTime()}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e);return t.setMilliseconds(0),t}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=r(t);return n.getFullYear()===o.getFullYear()}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=t&&Number(t.weekStartsOn)||0,o=r(e),i=o.getDay(),a=6+(i<n?-7:0)-(i-n);return o.setHours(0,0,0,0),o.setDate(o.getDate()+a),o}},function(e,t,n){var r=n(0),o=n(100);e.exports=function(e,t){var n=r(e),i=Number(t),a=n.getFullYear(),u=n.getDate(),s=new Date(0);s.setFullYear(a,i,15),s.setHours(0,0,0,0);var c=o(s);return n.setMonth(i,Math.min(u,c)),n}},function(e,t,n){"use strict";(function(t,r){var o=n(70);e.exports=_;var i,a=n(202);_.ReadableState=g;n(109).EventEmitter;var u=function(e,t){return e.listeners(t).length},s=n(203),c=n(71).Buffer,l=t.Uint8Array||function(){};var f=n(54);f.inherits=n(32);var d=n(482),p=void 0;p=d&&d.debuglog?d.debuglog("stream"):function(){};var h,m=n(483),v=n(205);f.inherits(_,s);var y=["error","close","destroy","pause","resume"];function g(e,t){i=i||n(33),e=e||{};var r=t instanceof i;this.objectMode=!!e.objectMode,r&&(this.objectMode=this.objectMode||!!e.readableObjectMode);var o=e.highWaterMark,a=e.readableHighWaterMark,u=this.objectMode?16:16384;this.highWaterMark=o||0===o?o:r&&(a||0===a)?a:u,this.highWaterMark=Math.floor(this.highWaterMark),this.buffer=new m,this.length=0,this.pipes=null,this.pipesCount=0,this.flowing=null,this.ended=!1,this.endEmitted=!1,this.reading=!1,this.sync=!0,this.needReadable=!1,this.emittedReadable=!1,this.readableListening=!1,this.resumeScheduled=!1,this.destroyed=!1,this.defaultEncoding=e.defaultEncoding||"utf8",this.awaitDrain=0,this.readingMore=!1,this.decoder=null,this.encoding=null,e.encoding&&(h||(h=n(206).StringDecoder),this.decoder=new h(e.encoding),this.encoding=e.encoding)}function _(e){if(i=i||n(33),!(this instanceof _))return new _(e);this._readableState=new g(e,this),this.readable=!0,e&&("function"==typeof e.read&&(this._read=e.read),"function"==typeof e.destroy&&(this._destroy=e.destroy)),s.call(this)}function b(e,t,n,r,o){var i,a=e._readableState;null===t?(a.reading=!1,function(e,t){if(t.ended)return;if(t.decoder){var n=t.decoder.end();n&&n.length&&(t.buffer.push(n),t.length+=t.objectMode?1:n.length)}t.ended=!0,O(e)}(e,a)):(o||(i=function(e,t){var n;(function(e){return c.isBuffer(e)||e instanceof l})(t)||"string"==typeof t||void 0===t||e.objectMode||(n=new TypeError("Invalid non-string/buffer chunk"));return n}(a,t)),i?e.emit("error",i):a.objectMode||t&&t.length>0?("string"==typeof t||a.objectMode||Object.getPrototypeOf(t)===c.prototype||(t=function(e){return c.from(e)}(t)),r?a.endEmitted?e.emit("error",new Error("stream.unshift() after end event")):w(e,a,t,!0):a.ended?e.emit("error",new Error("stream.push() after EOF")):(a.reading=!1,a.decoder&&!n?(t=a.decoder.write(t),a.objectMode||0!==t.length?w(e,a,t,!1):T(e,a)):w(e,a,t,!1))):r||(a.reading=!1));return function(e){return!e.ended&&(e.needReadable||e.length<e.highWaterMark||0===e.length)}(a)}function w(e,t,n,r){t.flowing&&0===t.length&&!t.sync?(e.emit("data",n),e.read(0)):(t.length+=t.objectMode?1:n.length,r?t.buffer.unshift(n):t.buffer.push(n),t.needReadable&&O(e)),T(e,t)}Object.defineProperty(_.prototype,"destroyed",{get:function(){return void 0!==this._readableState&&this._readableState.destroyed},set:function(e){this._readableState&&(this._readableState.destroyed=e)}}),_.prototype.destroy=v.destroy,_.prototype._undestroy=v.undestroy,_.prototype._destroy=function(e,t){this.push(null),t(e)},_.prototype.push=function(e,t){var n,r=this._readableState;return r.objectMode?n=!0:"string"==typeof e&&((t=t||r.defaultEncoding)!==r.encoding&&(e=c.from(e,t),t=""),n=!0),b(this,e,t,!1,n)},_.prototype.unshift=function(e){return b(this,e,null,!0,!1)},_.prototype.isPaused=function(){return!1===this._readableState.flowing},_.prototype.setEncoding=function(e){return h||(h=n(206).StringDecoder),this._readableState.decoder=new h(e),this._readableState.encoding=e,this};var x=8388608;function E(e,t){return e<=0||0===t.length&&t.ended?0:t.objectMode?1:e!=e?t.flowing&&t.length?t.buffer.head.data.length:t.length:(e>t.highWaterMark&&(t.highWaterMark=function(e){return e>=x?e=x:(e--,e|=e>>>1,e|=e>>>2,e|=e>>>4,e|=e>>>8,e|=e>>>16,e++),e}(e)),e<=t.length?e:t.ended?t.length:(t.needReadable=!0,0))}function O(e){var t=e._readableState;t.needReadable=!1,t.emittedReadable||(p("emitReadable",t.flowing),t.emittedReadable=!0,t.sync?o.nextTick(S,e):S(e))}function S(e){p("emit readable"),e.emit("readable"),P(e)}function T(e,t){t.readingMore||(t.readingMore=!0,o.nextTick(k,e,t))}function k(e,t){for(var n=t.length;!t.reading&&!t.flowing&&!t.ended&&t.length<t.highWaterMark&&(p("maybeReadMore read 0"),e.read(0),n!==t.length);)n=t.length;t.readingMore=!1}function R(e){p("readable nexttick read 0"),e.read(0)}function j(e,t){t.reading||(p("resume read 0"),e.read(0)),t.resumeScheduled=!1,t.awaitDrain=0,e.emit("resume"),P(e),t.flowing&&!t.reading&&e.read(0)}function P(e){var t=e._readableState;for(p("flow",t.flowing);t.flowing&&null!==e.read(););}function C(e,t){return 0===t.length?null:(t.objectMode?n=t.buffer.shift():!e||e>=t.length?(n=t.decoder?t.buffer.join(""):1===t.buffer.length?t.buffer.head.data:t.buffer.concat(t.length),t.buffer.clear()):n=function(e,t,n){var r;e<t.head.data.length?(r=t.head.data.slice(0,e),t.head.data=t.head.data.slice(e)):r=e===t.head.data.length?t.shift():n?function(e,t){var n=t.head,r=1,o=n.data;e-=o.length;for(;n=n.next;){var i=n.data,a=e>i.length?i.length:e;if(a===i.length?o+=i:o+=i.slice(0,e),0===(e-=a)){a===i.length?(++r,n.next?t.head=n.next:t.head=t.tail=null):(t.head=n,n.data=i.slice(a));break}++r}return t.length-=r,o}(e,t):function(e,t){var n=c.allocUnsafe(e),r=t.head,o=1;r.data.copy(n),e-=r.data.length;for(;r=r.next;){var i=r.data,a=e>i.length?i.length:e;if(i.copy(n,n.length-e,0,a),0===(e-=a)){a===i.length?(++o,r.next?t.head=r.next:t.head=t.tail=null):(t.head=r,r.data=i.slice(a));break}++o}return t.length-=o,n}(e,t);return r}(e,t.buffer,t.decoder),n);var n}function M(e){var t=e._readableState;if(t.length>0)throw new Error('"endReadable()" called on non-empty stream');t.endEmitted||(t.ended=!0,o.nextTick(I,t,e))}function I(e,t){e.endEmitted||0!==e.length||(e.endEmitted=!0,t.readable=!1,t.emit("end"))}function A(e,t){for(var n=0,r=e.length;n<r;n++)if(e[n]===t)return n;return-1}_.prototype.read=function(e){p("read",e),e=parseInt(e,10);var t=this._readableState,n=e;if(0!==e&&(t.emittedReadable=!1),0===e&&t.needReadable&&(t.length>=t.highWaterMark||t.ended))return p("read: emitReadable",t.length,t.ended),0===t.length&&t.ended?M(this):O(this),null;if(0===(e=E(e,t))&&t.ended)return 0===t.length&&M(this),null;var r,o=t.needReadable;return p("need readable",o),(0===t.length||t.length-e<t.highWaterMark)&&p("length less than watermark",o=!0),t.ended||t.reading?p("reading or ended",o=!1):o&&(p("do read"),t.reading=!0,t.sync=!0,0===t.length&&(t.needReadable=!0),this._read(t.highWaterMark),t.sync=!1,t.reading||(e=E(n,t))),null===(r=e>0?C(e,t):null)?(t.needReadable=!0,e=0):t.length-=e,0===t.length&&(t.ended||(t.needReadable=!0),n!==e&&t.ended&&M(this)),null!==r&&this.emit("data",r),r},_.prototype._read=function(e){this.emit("error",new Error("_read() is not implemented"))},_.prototype.pipe=function(e,t){var n=this,i=this._readableState;switch(i.pipesCount){case 0:i.pipes=e;break;case 1:i.pipes=[i.pipes,e];break;default:i.pipes.push(e)}i.pipesCount+=1,p("pipe count=%d opts=%j",i.pipesCount,t);var s=(!t||!1!==t.end)&&e!==r.stdout&&e!==r.stderr?l:_;function c(t,r){p("onunpipe"),t===n&&r&&!1===r.hasUnpiped&&(r.hasUnpiped=!0,p("cleanup"),e.removeListener("close",y),e.removeListener("finish",g),e.removeListener("drain",f),e.removeListener("error",v),e.removeListener("unpipe",c),n.removeListener("end",l),n.removeListener("end",_),n.removeListener("data",m),d=!0,!i.awaitDrain||e._writableState&&!e._writableState.needDrain||f())}function l(){p("onend"),e.end()}i.endEmitted?o.nextTick(s):n.once("end",s),e.on("unpipe",c);var f=function(e){return function(){var t=e._readableState;p("pipeOnDrain",t.awaitDrain),t.awaitDrain&&t.awaitDrain--,0===t.awaitDrain&&u(e,"data")&&(t.flowing=!0,P(e))}}(n);e.on("drain",f);var d=!1;var h=!1;function m(t){p("ondata"),h=!1,!1!==e.write(t)||h||((1===i.pipesCount&&i.pipes===e||i.pipesCount>1&&-1!==A(i.pipes,e))&&!d&&(p("false write response, pause",n._readableState.awaitDrain),n._readableState.awaitDrain++,h=!0),n.pause())}function v(t){p("onerror",t),_(),e.removeListener("error",v),0===u(e,"error")&&e.emit("error",t)}function y(){e.removeListener("finish",g),_()}function g(){p("onfinish"),e.removeListener("close",y),_()}function _(){p("unpipe"),n.unpipe(e)}return n.on("data",m),function(e,t,n){if("function"==typeof e.prependListener)return e.prependListener(t,n);e._events&&e._events[t]?a(e._events[t])?e._events[t].unshift(n):e._events[t]=[n,e._events[t]]:e.on(t,n)}(e,"error",v),e.once("close",y),e.once("finish",g),e.emit("pipe",n),i.flowing||(p("pipe resume"),n.resume()),e},_.prototype.unpipe=function(e){var t=this._readableState,n={hasUnpiped:!1};if(0===t.pipesCount)return this;if(1===t.pipesCount)return e&&e!==t.pipes?this:(e||(e=t.pipes),t.pipes=null,t.pipesCount=0,t.flowing=!1,e&&e.emit("unpipe",this,n),this);if(!e){var r=t.pipes,o=t.pipesCount;t.pipes=null,t.pipesCount=0,t.flowing=!1;for(var i=0;i<o;i++)r[i].emit("unpipe",this,n);return this}var a=A(t.pipes,e);return-1===a?this:(t.pipes.splice(a,1),t.pipesCount-=1,1===t.pipesCount&&(t.pipes=t.pipes[0]),e.emit("unpipe",this,n),this)},_.prototype.on=function(e,t){var n=s.prototype.on.call(this,e,t);if("data"===e)!1!==this._readableState.flowing&&this.resume();else if("readable"===e){var r=this._readableState;r.endEmitted||r.readableListening||(r.readableListening=r.needReadable=!0,r.emittedReadable=!1,r.reading?r.length&&O(this):o.nextTick(R,this))}return n},_.prototype.addListener=_.prototype.on,_.prototype.resume=function(){var e=this._readableState;return e.flowing||(p("resume"),e.flowing=!0,function(e,t){t.resumeScheduled||(t.resumeScheduled=!0,o.nextTick(j,e,t))}(this,e)),this},_.prototype.pause=function(){return p("call pause flowing=%j",this._readableState.flowing),!1!==this._readableState.flowing&&(p("pause"),this._readableState.flowing=!1,this.emit("pause")),this},_.prototype.wrap=function(e){var t=this,n=this._readableState,r=!1;for(var o in e.on("end",function(){if(p("wrapped end"),n.decoder&&!n.ended){var e=n.decoder.end();e&&e.length&&t.push(e)}t.push(null)}),e.on("data",function(o){(p("wrapped data"),n.decoder&&(o=n.decoder.write(o)),!n.objectMode||null!==o&&void 0!==o)&&((n.objectMode||o&&o.length)&&(t.push(o)||(r=!0,e.pause())))}),e)void 0===this[o]&&"function"==typeof e[o]&&(this[o]=function(t){return function(){return e[t].apply(e,arguments)}}(o));for(var i=0;i<y.length;i++)e.on(y[i],this.emit.bind(this,y[i]));return this._read=function(t){p("wrapped _read",t),r&&(r=!1,e.resume())},this},Object.defineProperty(_.prototype,"readableHighWaterMark",{enumerable:!1,get:function(){return this._readableState.highWaterMark}}),_._fromList=C}).call(t,n(3),n(40))},function(e,t){var n={}.toString;e.exports=Array.isArray||function(e){return"[object Array]"==n.call(e)}},function(e,t,n){e.exports=n(109).EventEmitter},function(e,t,n){"use strict";(function(e){ -/*! - * The buffer module from node.js, for the browser. - * - * @author Feross Aboukhadijeh <feross@feross.org> <http://feross.org> - * @license MIT - */ -var r=n(480),o=n(481),i=n(202);function a(){return s.TYPED_ARRAY_SUPPORT?2147483647:1073741823}function u(e,t){if(a()<t)throw new RangeError("Invalid typed array length");return s.TYPED_ARRAY_SUPPORT?(e=new Uint8Array(t)).__proto__=s.prototype:(null===e&&(e=new s(t)),e.length=t),e}function s(e,t,n){if(!(s.TYPED_ARRAY_SUPPORT||this instanceof s))return new s(e,t,n);if("number"==typeof e){if("string"==typeof t)throw new Error("If encoding is specified then the first argument must be a string");return f(this,e)}return c(this,e,t,n)}function c(e,t,n,r){if("number"==typeof t)throw new TypeError('"value" argument must not be a number');return"undefined"!=typeof ArrayBuffer&&t instanceof ArrayBuffer?function(e,t,n,r){if(t.byteLength,n<0||t.byteLength<n)throw new RangeError("'offset' is out of bounds");if(t.byteLength<n+(r||0))throw new RangeError("'length' is out of bounds");t=void 0===n&&void 0===r?new Uint8Array(t):void 0===r?new Uint8Array(t,n):new Uint8Array(t,n,r);s.TYPED_ARRAY_SUPPORT?(e=t).__proto__=s.prototype:e=d(e,t);return e}(e,t,n,r):"string"==typeof t?function(e,t,n){"string"==typeof n&&""!==n||(n="utf8");if(!s.isEncoding(n))throw new TypeError('"encoding" must be a valid string encoding');var r=0|h(t,n),o=(e=u(e,r)).write(t,n);o!==r&&(e=e.slice(0,o));return e}(e,t,n):function(e,t){if(s.isBuffer(t)){var n=0|p(t.length);return 0===(e=u(e,n)).length?e:(t.copy(e,0,0,n),e)}if(t){if("undefined"!=typeof ArrayBuffer&&t.buffer instanceof ArrayBuffer||"length"in t)return"number"!=typeof t.length||function(e){return e!=e}(t.length)?u(e,0):d(e,t);if("Buffer"===t.type&&i(t.data))return d(e,t.data)}throw new TypeError("First argument must be a string, Buffer, ArrayBuffer, Array, or array-like object.")}(e,t)}function l(e){if("number"!=typeof e)throw new TypeError('"size" argument must be a number');if(e<0)throw new RangeError('"size" argument must not be negative')}function f(e,t){if(l(t),e=u(e,t<0?0:0|p(t)),!s.TYPED_ARRAY_SUPPORT)for(var n=0;n<t;++n)e[n]=0;return e}function d(e,t){var n=t.length<0?0:0|p(t.length);e=u(e,n);for(var r=0;r<n;r+=1)e[r]=255&t[r];return e}function p(e){if(e>=a())throw new RangeError("Attempt to allocate Buffer larger than maximum size: 0x"+a().toString(16)+" bytes");return 0|e}function h(e,t){if(s.isBuffer(e))return e.length;if("undefined"!=typeof ArrayBuffer&&"function"==typeof ArrayBuffer.isView&&(ArrayBuffer.isView(e)||e instanceof ArrayBuffer))return e.byteLength;"string"!=typeof e&&(e=""+e);var n=e.length;if(0===n)return 0;for(var r=!1;;)switch(t){case"ascii":case"latin1":case"binary":return n;case"utf8":case"utf-8":case void 0:return B(e).length;case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return 2*n;case"hex":return n>>>1;case"base64":return W(e).length;default:if(r)return B(e).length;t=(""+t).toLowerCase(),r=!0}}function m(e,t,n){var r=e[t];e[t]=e[n],e[n]=r}function v(e,t,n,r,o){if(0===e.length)return-1;if("string"==typeof n?(r=n,n=0):n>2147483647?n=2147483647:n<-2147483648&&(n=-2147483648),n=+n,isNaN(n)&&(n=o?0:e.length-1),n<0&&(n=e.length+n),n>=e.length){if(o)return-1;n=e.length-1}else if(n<0){if(!o)return-1;n=0}if("string"==typeof t&&(t=s.from(t,r)),s.isBuffer(t))return 0===t.length?-1:y(e,t,n,r,o);if("number"==typeof t)return t&=255,s.TYPED_ARRAY_SUPPORT&&"function"==typeof Uint8Array.prototype.indexOf?o?Uint8Array.prototype.indexOf.call(e,t,n):Uint8Array.prototype.lastIndexOf.call(e,t,n):y(e,[t],n,r,o);throw new TypeError("val must be string, number or Buffer")}function y(e,t,n,r,o){var i,a=1,u=e.length,s=t.length;if(void 0!==r&&("ucs2"===(r=String(r).toLowerCase())||"ucs-2"===r||"utf16le"===r||"utf-16le"===r)){if(e.length<2||t.length<2)return-1;a=2,u/=2,s/=2,n/=2}function c(e,t){return 1===a?e[t]:e.readUInt16BE(t*a)}if(o){var l=-1;for(i=n;i<u;i++)if(c(e,i)===c(t,-1===l?0:i-l)){if(-1===l&&(l=i),i-l+1===s)return l*a}else-1!==l&&(i-=i-l),l=-1}else for(n+s>u&&(n=u-s),i=n;i>=0;i--){for(var f=!0,d=0;d<s;d++)if(c(e,i+d)!==c(t,d)){f=!1;break}if(f)return i}return-1}function g(e,t,n,r){n=Number(n)||0;var o=e.length-n;r?(r=Number(r))>o&&(r=o):r=o;var i=t.length;if(i%2!=0)throw new TypeError("Invalid hex string");r>i/2&&(r=i/2);for(var a=0;a<r;++a){var u=parseInt(t.substr(2*a,2),16);if(isNaN(u))return a;e[n+a]=u}return a}function _(e,t,n,r){return Y(B(t,e.length-n),e,n,r)}function b(e,t,n,r){return Y(function(e){for(var t=[],n=0;n<e.length;++n)t.push(255&e.charCodeAt(n));return t}(t),e,n,r)}function w(e,t,n,r){return b(e,t,n,r)}function x(e,t,n,r){return Y(W(t),e,n,r)}function E(e,t,n,r){return Y(function(e,t){for(var n,r,o,i=[],a=0;a<e.length&&!((t-=2)<0);++a)n=e.charCodeAt(a),r=n>>8,o=n%256,i.push(o),i.push(r);return i}(t,e.length-n),e,n,r)}function O(e,t,n){return 0===t&&n===e.length?r.fromByteArray(e):r.fromByteArray(e.slice(t,n))}function S(e,t,n){n=Math.min(e.length,n);for(var r=[],o=t;o<n;){var i,a,u,s,c=e[o],l=null,f=c>239?4:c>223?3:c>191?2:1;if(o+f<=n)switch(f){case 1:c<128&&(l=c);break;case 2:128==(192&(i=e[o+1]))&&(s=(31&c)<<6|63&i)>127&&(l=s);break;case 3:i=e[o+1],a=e[o+2],128==(192&i)&&128==(192&a)&&(s=(15&c)<<12|(63&i)<<6|63&a)>2047&&(s<55296||s>57343)&&(l=s);break;case 4:i=e[o+1],a=e[o+2],u=e[o+3],128==(192&i)&&128==(192&a)&&128==(192&u)&&(s=(15&c)<<18|(63&i)<<12|(63&a)<<6|63&u)>65535&&s<1114112&&(l=s)}null===l?(l=65533,f=1):l>65535&&(l-=65536,r.push(l>>>10&1023|55296),l=56320|1023&l),r.push(l),o+=f}return function(e){var t=e.length;if(t<=T)return String.fromCharCode.apply(String,e);var n="",r=0;for(;r<t;)n+=String.fromCharCode.apply(String,e.slice(r,r+=T));return n}(r)}t.Buffer=s,t.SlowBuffer=function(e){+e!=e&&(e=0);return s.alloc(+e)},t.INSPECT_MAX_BYTES=50,s.TYPED_ARRAY_SUPPORT=void 0!==e.TYPED_ARRAY_SUPPORT?e.TYPED_ARRAY_SUPPORT:function(){try{var e=new Uint8Array(1);return e.__proto__={__proto__:Uint8Array.prototype,foo:function(){return 42}},42===e.foo()&&"function"==typeof e.subarray&&0===e.subarray(1,1).byteLength}catch(e){return!1}}(),t.kMaxLength=a(),s.poolSize=8192,s._augment=function(e){return e.__proto__=s.prototype,e},s.from=function(e,t,n){return c(null,e,t,n)},s.TYPED_ARRAY_SUPPORT&&(s.prototype.__proto__=Uint8Array.prototype,s.__proto__=Uint8Array,"undefined"!=typeof Symbol&&Symbol.species&&s[Symbol.species]===s&&Object.defineProperty(s,Symbol.species,{value:null,configurable:!0})),s.alloc=function(e,t,n){return function(e,t,n,r){return l(t),t<=0?u(e,t):void 0!==n?"string"==typeof r?u(e,t).fill(n,r):u(e,t).fill(n):u(e,t)}(null,e,t,n)},s.allocUnsafe=function(e){return f(null,e)},s.allocUnsafeSlow=function(e){return f(null,e)},s.isBuffer=function(e){return!(null==e||!e._isBuffer)},s.compare=function(e,t){if(!s.isBuffer(e)||!s.isBuffer(t))throw new TypeError("Arguments must be Buffers");if(e===t)return 0;for(var n=e.length,r=t.length,o=0,i=Math.min(n,r);o<i;++o)if(e[o]!==t[o]){n=e[o],r=t[o];break}return n<r?-1:r<n?1:0},s.isEncoding=function(e){switch(String(e).toLowerCase()){case"hex":case"utf8":case"utf-8":case"ascii":case"latin1":case"binary":case"base64":case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return!0;default:return!1}},s.concat=function(e,t){if(!i(e))throw new TypeError('"list" argument must be an Array of Buffers');if(0===e.length)return s.alloc(0);var n;if(void 0===t)for(t=0,n=0;n<e.length;++n)t+=e[n].length;var r=s.allocUnsafe(t),o=0;for(n=0;n<e.length;++n){var a=e[n];if(!s.isBuffer(a))throw new TypeError('"list" argument must be an Array of Buffers');a.copy(r,o),o+=a.length}return r},s.byteLength=h,s.prototype._isBuffer=!0,s.prototype.swap16=function(){var e=this.length;if(e%2!=0)throw new RangeError("Buffer size must be a multiple of 16-bits");for(var t=0;t<e;t+=2)m(this,t,t+1);return this},s.prototype.swap32=function(){var e=this.length;if(e%4!=0)throw new RangeError("Buffer size must be a multiple of 32-bits");for(var t=0;t<e;t+=4)m(this,t,t+3),m(this,t+1,t+2);return this},s.prototype.swap64=function(){var e=this.length;if(e%8!=0)throw new RangeError("Buffer size must be a multiple of 64-bits");for(var t=0;t<e;t+=8)m(this,t,t+7),m(this,t+1,t+6),m(this,t+2,t+5),m(this,t+3,t+4);return this},s.prototype.toString=function(){var e=0|this.length;return 0===e?"":0===arguments.length?S(this,0,e):function(e,t,n){var r=!1;if((void 0===t||t<0)&&(t=0),t>this.length)return"";if((void 0===n||n>this.length)&&(n=this.length),n<=0)return"";if((n>>>=0)<=(t>>>=0))return"";for(e||(e="utf8");;)switch(e){case"hex":return j(this,t,n);case"utf8":case"utf-8":return S(this,t,n);case"ascii":return k(this,t,n);case"latin1":case"binary":return R(this,t,n);case"base64":return O(this,t,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return P(this,t,n);default:if(r)throw new TypeError("Unknown encoding: "+e);e=(e+"").toLowerCase(),r=!0}}.apply(this,arguments)},s.prototype.equals=function(e){if(!s.isBuffer(e))throw new TypeError("Argument must be a Buffer");return this===e||0===s.compare(this,e)},s.prototype.inspect=function(){var e="",n=t.INSPECT_MAX_BYTES;return this.length>0&&(e=this.toString("hex",0,n).match(/.{2}/g).join(" "),this.length>n&&(e+=" ... ")),"<Buffer "+e+">"},s.prototype.compare=function(e,t,n,r,o){if(!s.isBuffer(e))throw new TypeError("Argument must be a Buffer");if(void 0===t&&(t=0),void 0===n&&(n=e?e.length:0),void 0===r&&(r=0),void 0===o&&(o=this.length),t<0||n>e.length||r<0||o>this.length)throw new RangeError("out of range index");if(r>=o&&t>=n)return 0;if(r>=o)return-1;if(t>=n)return 1;if(t>>>=0,n>>>=0,r>>>=0,o>>>=0,this===e)return 0;for(var i=o-r,a=n-t,u=Math.min(i,a),c=this.slice(r,o),l=e.slice(t,n),f=0;f<u;++f)if(c[f]!==l[f]){i=c[f],a=l[f];break}return i<a?-1:a<i?1:0},s.prototype.includes=function(e,t,n){return-1!==this.indexOf(e,t,n)},s.prototype.indexOf=function(e,t,n){return v(this,e,t,n,!0)},s.prototype.lastIndexOf=function(e,t,n){return v(this,e,t,n,!1)},s.prototype.write=function(e,t,n,r){if(void 0===t)r="utf8",n=this.length,t=0;else if(void 0===n&&"string"==typeof t)r=t,n=this.length,t=0;else{if(!isFinite(t))throw new Error("Buffer.write(string, encoding, offset[, length]) is no longer supported");t|=0,isFinite(n)?(n|=0,void 0===r&&(r="utf8")):(r=n,n=void 0)}var o=this.length-t;if((void 0===n||n>o)&&(n=o),e.length>0&&(n<0||t<0)||t>this.length)throw new RangeError("Attempt to write outside buffer bounds");r||(r="utf8");for(var i=!1;;)switch(r){case"hex":return g(this,e,t,n);case"utf8":case"utf-8":return _(this,e,t,n);case"ascii":return b(this,e,t,n);case"latin1":case"binary":return w(this,e,t,n);case"base64":return x(this,e,t,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return E(this,e,t,n);default:if(i)throw new TypeError("Unknown encoding: "+r);r=(""+r).toLowerCase(),i=!0}},s.prototype.toJSON=function(){return{type:"Buffer",data:Array.prototype.slice.call(this._arr||this,0)}};var T=4096;function k(e,t,n){var r="";n=Math.min(e.length,n);for(var o=t;o<n;++o)r+=String.fromCharCode(127&e[o]);return r}function R(e,t,n){var r="";n=Math.min(e.length,n);for(var o=t;o<n;++o)r+=String.fromCharCode(e[o]);return r}function j(e,t,n){var r=e.length;(!t||t<0)&&(t=0),(!n||n<0||n>r)&&(n=r);for(var o="",i=t;i<n;++i)o+=F(e[i]);return o}function P(e,t,n){for(var r=e.slice(t,n),o="",i=0;i<r.length;i+=2)o+=String.fromCharCode(r[i]+256*r[i+1]);return o}function C(e,t,n){if(e%1!=0||e<0)throw new RangeError("offset is not uint");if(e+t>n)throw new RangeError("Trying to access beyond buffer length")}function M(e,t,n,r,o,i){if(!s.isBuffer(e))throw new TypeError('"buffer" argument must be a Buffer instance');if(t>o||t<i)throw new RangeError('"value" argument is out of bounds');if(n+r>e.length)throw new RangeError("Index out of range")}function I(e,t,n,r){t<0&&(t=65535+t+1);for(var o=0,i=Math.min(e.length-n,2);o<i;++o)e[n+o]=(t&255<<8*(r?o:1-o))>>>8*(r?o:1-o)}function A(e,t,n,r){t<0&&(t=4294967295+t+1);for(var o=0,i=Math.min(e.length-n,4);o<i;++o)e[n+o]=t>>>8*(r?o:3-o)&255}function D(e,t,n,r,o,i){if(n+r>e.length)throw new RangeError("Index out of range");if(n<0)throw new RangeError("Index out of range")}function N(e,t,n,r,i){return i||D(e,0,n,4),o.write(e,t,n,r,23,4),n+4}function L(e,t,n,r,i){return i||D(e,0,n,8),o.write(e,t,n,r,52,8),n+8}s.prototype.slice=function(e,t){var n,r=this.length;if(e=~~e,t=void 0===t?r:~~t,e<0?(e+=r)<0&&(e=0):e>r&&(e=r),t<0?(t+=r)<0&&(t=0):t>r&&(t=r),t<e&&(t=e),s.TYPED_ARRAY_SUPPORT)(n=this.subarray(e,t)).__proto__=s.prototype;else{var o=t-e;n=new s(o,void 0);for(var i=0;i<o;++i)n[i]=this[i+e]}return n},s.prototype.readUIntLE=function(e,t,n){e|=0,t|=0,n||C(e,t,this.length);for(var r=this[e],o=1,i=0;++i<t&&(o*=256);)r+=this[e+i]*o;return r},s.prototype.readUIntBE=function(e,t,n){e|=0,t|=0,n||C(e,t,this.length);for(var r=this[e+--t],o=1;t>0&&(o*=256);)r+=this[e+--t]*o;return r},s.prototype.readUInt8=function(e,t){return t||C(e,1,this.length),this[e]},s.prototype.readUInt16LE=function(e,t){return t||C(e,2,this.length),this[e]|this[e+1]<<8},s.prototype.readUInt16BE=function(e,t){return t||C(e,2,this.length),this[e]<<8|this[e+1]},s.prototype.readUInt32LE=function(e,t){return t||C(e,4,this.length),(this[e]|this[e+1]<<8|this[e+2]<<16)+16777216*this[e+3]},s.prototype.readUInt32BE=function(e,t){return t||C(e,4,this.length),16777216*this[e]+(this[e+1]<<16|this[e+2]<<8|this[e+3])},s.prototype.readIntLE=function(e,t,n){e|=0,t|=0,n||C(e,t,this.length);for(var r=this[e],o=1,i=0;++i<t&&(o*=256);)r+=this[e+i]*o;return r>=(o*=128)&&(r-=Math.pow(2,8*t)),r},s.prototype.readIntBE=function(e,t,n){e|=0,t|=0,n||C(e,t,this.length);for(var r=t,o=1,i=this[e+--r];r>0&&(o*=256);)i+=this[e+--r]*o;return i>=(o*=128)&&(i-=Math.pow(2,8*t)),i},s.prototype.readInt8=function(e,t){return t||C(e,1,this.length),128&this[e]?-1*(255-this[e]+1):this[e]},s.prototype.readInt16LE=function(e,t){t||C(e,2,this.length);var n=this[e]|this[e+1]<<8;return 32768&n?4294901760|n:n},s.prototype.readInt16BE=function(e,t){t||C(e,2,this.length);var n=this[e+1]|this[e]<<8;return 32768&n?4294901760|n:n},s.prototype.readInt32LE=function(e,t){return t||C(e,4,this.length),this[e]|this[e+1]<<8|this[e+2]<<16|this[e+3]<<24},s.prototype.readInt32BE=function(e,t){return t||C(e,4,this.length),this[e]<<24|this[e+1]<<16|this[e+2]<<8|this[e+3]},s.prototype.readFloatLE=function(e,t){return t||C(e,4,this.length),o.read(this,e,!0,23,4)},s.prototype.readFloatBE=function(e,t){return t||C(e,4,this.length),o.read(this,e,!1,23,4)},s.prototype.readDoubleLE=function(e,t){return t||C(e,8,this.length),o.read(this,e,!0,52,8)},s.prototype.readDoubleBE=function(e,t){return t||C(e,8,this.length),o.read(this,e,!1,52,8)},s.prototype.writeUIntLE=function(e,t,n,r){(e=+e,t|=0,n|=0,r)||M(this,e,t,n,Math.pow(2,8*n)-1,0);var o=1,i=0;for(this[t]=255&e;++i<n&&(o*=256);)this[t+i]=e/o&255;return t+n},s.prototype.writeUIntBE=function(e,t,n,r){(e=+e,t|=0,n|=0,r)||M(this,e,t,n,Math.pow(2,8*n)-1,0);var o=n-1,i=1;for(this[t+o]=255&e;--o>=0&&(i*=256);)this[t+o]=e/i&255;return t+n},s.prototype.writeUInt8=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,1,255,0),s.TYPED_ARRAY_SUPPORT||(e=Math.floor(e)),this[t]=255&e,t+1},s.prototype.writeUInt16LE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,2,65535,0),s.TYPED_ARRAY_SUPPORT?(this[t]=255&e,this[t+1]=e>>>8):I(this,e,t,!0),t+2},s.prototype.writeUInt16BE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,2,65535,0),s.TYPED_ARRAY_SUPPORT?(this[t]=e>>>8,this[t+1]=255&e):I(this,e,t,!1),t+2},s.prototype.writeUInt32LE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,4,4294967295,0),s.TYPED_ARRAY_SUPPORT?(this[t+3]=e>>>24,this[t+2]=e>>>16,this[t+1]=e>>>8,this[t]=255&e):A(this,e,t,!0),t+4},s.prototype.writeUInt32BE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,4,4294967295,0),s.TYPED_ARRAY_SUPPORT?(this[t]=e>>>24,this[t+1]=e>>>16,this[t+2]=e>>>8,this[t+3]=255&e):A(this,e,t,!1),t+4},s.prototype.writeIntLE=function(e,t,n,r){if(e=+e,t|=0,!r){var o=Math.pow(2,8*n-1);M(this,e,t,n,o-1,-o)}var i=0,a=1,u=0;for(this[t]=255&e;++i<n&&(a*=256);)e<0&&0===u&&0!==this[t+i-1]&&(u=1),this[t+i]=(e/a>>0)-u&255;return t+n},s.prototype.writeIntBE=function(e,t,n,r){if(e=+e,t|=0,!r){var o=Math.pow(2,8*n-1);M(this,e,t,n,o-1,-o)}var i=n-1,a=1,u=0;for(this[t+i]=255&e;--i>=0&&(a*=256);)e<0&&0===u&&0!==this[t+i+1]&&(u=1),this[t+i]=(e/a>>0)-u&255;return t+n},s.prototype.writeInt8=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,1,127,-128),s.TYPED_ARRAY_SUPPORT||(e=Math.floor(e)),e<0&&(e=255+e+1),this[t]=255&e,t+1},s.prototype.writeInt16LE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,2,32767,-32768),s.TYPED_ARRAY_SUPPORT?(this[t]=255&e,this[t+1]=e>>>8):I(this,e,t,!0),t+2},s.prototype.writeInt16BE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,2,32767,-32768),s.TYPED_ARRAY_SUPPORT?(this[t]=e>>>8,this[t+1]=255&e):I(this,e,t,!1),t+2},s.prototype.writeInt32LE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,4,2147483647,-2147483648),s.TYPED_ARRAY_SUPPORT?(this[t]=255&e,this[t+1]=e>>>8,this[t+2]=e>>>16,this[t+3]=e>>>24):A(this,e,t,!0),t+4},s.prototype.writeInt32BE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,4,2147483647,-2147483648),e<0&&(e=4294967295+e+1),s.TYPED_ARRAY_SUPPORT?(this[t]=e>>>24,this[t+1]=e>>>16,this[t+2]=e>>>8,this[t+3]=255&e):A(this,e,t,!1),t+4},s.prototype.writeFloatLE=function(e,t,n){return N(this,e,t,!0,n)},s.prototype.writeFloatBE=function(e,t,n){return N(this,e,t,!1,n)},s.prototype.writeDoubleLE=function(e,t,n){return L(this,e,t,!0,n)},s.prototype.writeDoubleBE=function(e,t,n){return L(this,e,t,!1,n)},s.prototype.copy=function(e,t,n,r){if(n||(n=0),r||0===r||(r=this.length),t>=e.length&&(t=e.length),t||(t=0),r>0&&r<n&&(r=n),r===n)return 0;if(0===e.length||0===this.length)return 0;if(t<0)throw new RangeError("targetStart out of bounds");if(n<0||n>=this.length)throw new RangeError("sourceStart out of bounds");if(r<0)throw new RangeError("sourceEnd out of bounds");r>this.length&&(r=this.length),e.length-t<r-n&&(r=e.length-t+n);var o,i=r-n;if(this===e&&n<t&&t<r)for(o=i-1;o>=0;--o)e[o+t]=this[o+n];else if(i<1e3||!s.TYPED_ARRAY_SUPPORT)for(o=0;o<i;++o)e[o+t]=this[o+n];else Uint8Array.prototype.set.call(e,this.subarray(n,n+i),t);return i},s.prototype.fill=function(e,t,n,r){if("string"==typeof e){if("string"==typeof t?(r=t,t=0,n=this.length):"string"==typeof n&&(r=n,n=this.length),1===e.length){var o=e.charCodeAt(0);o<256&&(e=o)}if(void 0!==r&&"string"!=typeof r)throw new TypeError("encoding must be a string");if("string"==typeof r&&!s.isEncoding(r))throw new TypeError("Unknown encoding: "+r)}else"number"==typeof e&&(e&=255);if(t<0||this.length<t||this.length<n)throw new RangeError("Out of range index");if(n<=t)return this;var i;if(t>>>=0,n=void 0===n?this.length:n>>>0,e||(e=0),"number"==typeof e)for(i=t;i<n;++i)this[i]=e;else{var a=s.isBuffer(e)?e:B(new s(e,r).toString()),u=a.length;for(i=0;i<n-t;++i)this[i+t]=a[i%u]}return this};var U=/[^+\/0-9A-Za-z-_]/g;function F(e){return e<16?"0"+e.toString(16):e.toString(16)}function B(e,t){var n;t=t||1/0;for(var r=e.length,o=null,i=[],a=0;a<r;++a){if((n=e.charCodeAt(a))>55295&&n<57344){if(!o){if(n>56319){(t-=3)>-1&&i.push(239,191,189);continue}if(a+1===r){(t-=3)>-1&&i.push(239,191,189);continue}o=n;continue}if(n<56320){(t-=3)>-1&&i.push(239,191,189),o=n;continue}n=65536+(o-55296<<10|n-56320)}else o&&(t-=3)>-1&&i.push(239,191,189);if(o=null,n<128){if((t-=1)<0)break;i.push(n)}else if(n<2048){if((t-=2)<0)break;i.push(n>>6|192,63&n|128)}else if(n<65536){if((t-=3)<0)break;i.push(n>>12|224,n>>6&63|128,63&n|128)}else{if(!(n<1114112))throw new Error("Invalid code point");if((t-=4)<0)break;i.push(n>>18|240,n>>12&63|128,n>>6&63|128,63&n|128)}}return i}function W(e){return r.toByteArray(function(e){if((e=function(e){return e.trim?e.trim():e.replace(/^\s+|\s+$/g,"")}(e).replace(U,"")).length<2)return"";for(;e.length%4!=0;)e+="=";return e}(e))}function Y(e,t,n,r){for(var o=0;o<r&&!(o+n>=t.length||o>=e.length);++o)t[o+n]=e[o];return o}}).call(t,n(3))},function(e,t,n){"use strict";var r=n(70);function o(e,t){e.emit("error",t)}e.exports={destroy:function(e,t){var n=this,i=this._readableState&&this._readableState.destroyed,a=this._writableState&&this._writableState.destroyed;return i||a?(t?t(e):!e||this._writableState&&this._writableState.errorEmitted||r.nextTick(o,this,e),this):(this._readableState&&(this._readableState.destroyed=!0),this._writableState&&(this._writableState.destroyed=!0),this._destroy(e||null,function(e){!t&&e?(r.nextTick(o,n,e),n._writableState&&(n._writableState.errorEmitted=!0)):t&&t(e)}),this)},undestroy:function(){this._readableState&&(this._readableState.destroyed=!1,this._readableState.reading=!1,this._readableState.ended=!1,this._readableState.endEmitted=!1),this._writableState&&(this._writableState.destroyed=!1,this._writableState.ended=!1,this._writableState.ending=!1,this._writableState.finished=!1,this._writableState.errorEmitted=!1)}}},function(e,t,n){"use strict";var r=n(71).Buffer,o=r.isEncoding||function(e){switch((e=""+e)&&e.toLowerCase()){case"hex":case"utf8":case"utf-8":case"ascii":case"binary":case"base64":case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":case"raw":return!0;default:return!1}};function i(e){var t;switch(this.encoding=function(e){var t=function(e){if(!e)return"utf8";for(var t;;)switch(e){case"utf8":case"utf-8":return"utf8";case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return"utf16le";case"latin1":case"binary":return"latin1";case"base64":case"ascii":case"hex":return e;default:if(t)return;e=(""+e).toLowerCase(),t=!0}}(e);if("string"!=typeof t&&(r.isEncoding===o||!o(e)))throw new Error("Unknown encoding: "+e);return t||e}(e),this.encoding){case"utf16le":this.text=s,this.end=c,t=4;break;case"utf8":this.fillLast=u,t=4;break;case"base64":this.text=l,this.end=f,t=3;break;default:return this.write=d,void(this.end=p)}this.lastNeed=0,this.lastTotal=0,this.lastChar=r.allocUnsafe(t)}function a(e){return e<=127?0:e>>5==6?2:e>>4==14?3:e>>3==30?4:e>>6==2?-1:-2}function u(e){var t=this.lastTotal-this.lastNeed,n=function(e,t,n){if(128!=(192&t[0]))return e.lastNeed=0,"�";if(e.lastNeed>1&&t.length>1){if(128!=(192&t[1]))return e.lastNeed=1,"�";if(e.lastNeed>2&&t.length>2&&128!=(192&t[2]))return e.lastNeed=2,"�"}}(this,e);return void 0!==n?n:this.lastNeed<=e.length?(e.copy(this.lastChar,t,0,this.lastNeed),this.lastChar.toString(this.encoding,0,this.lastTotal)):(e.copy(this.lastChar,t,0,e.length),void(this.lastNeed-=e.length))}function s(e,t){if((e.length-t)%2==0){var n=e.toString("utf16le",t);if(n){var r=n.charCodeAt(n.length-1);if(r>=55296&&r<=56319)return this.lastNeed=2,this.lastTotal=4,this.lastChar[0]=e[e.length-2],this.lastChar[1]=e[e.length-1],n.slice(0,-1)}return n}return this.lastNeed=1,this.lastTotal=2,this.lastChar[0]=e[e.length-1],e.toString("utf16le",t,e.length-1)}function c(e){var t=e&&e.length?this.write(e):"";if(this.lastNeed){var n=this.lastTotal-this.lastNeed;return t+this.lastChar.toString("utf16le",0,n)}return t}function l(e,t){var n=(e.length-t)%3;return 0===n?e.toString("base64",t):(this.lastNeed=3-n,this.lastTotal=3,1===n?this.lastChar[0]=e[e.length-1]:(this.lastChar[0]=e[e.length-2],this.lastChar[1]=e[e.length-1]),e.toString("base64",t,e.length-n))}function f(e){var t=e&&e.length?this.write(e):"";return this.lastNeed?t+this.lastChar.toString("base64",0,3-this.lastNeed):t}function d(e){return e.toString(this.encoding)}function p(e){return e&&e.length?this.write(e):""}t.StringDecoder=i,i.prototype.write=function(e){if(0===e.length)return"";var t,n;if(this.lastNeed){if(void 0===(t=this.fillLast(e)))return"";n=this.lastNeed,this.lastNeed=0}else n=0;return n<e.length?t?t+this.text(e,n):this.text(e,n):t||""},i.prototype.end=function(e){var t=e&&e.length?this.write(e):"";return this.lastNeed?t+"�":t},i.prototype.text=function(e,t){var n=function(e,t,n){var r=t.length-1;if(r<n)return 0;var o=a(t[r]);if(o>=0)return o>0&&(e.lastNeed=o-1),o;if(--r<n||-2===o)return 0;if((o=a(t[r]))>=0)return o>0&&(e.lastNeed=o-2),o;if(--r<n||-2===o)return 0;if((o=a(t[r]))>=0)return o>0&&(2===o?o=0:e.lastNeed=o-3),o;return 0}(this,e,t);if(!this.lastNeed)return e.toString("utf8",t);this.lastTotal=n;var r=e.length-(n-this.lastNeed);return e.copy(this.lastChar,0,r),e.toString("utf8",t,r)},i.prototype.fillLast=function(e){if(this.lastNeed<=e.length)return e.copy(this.lastChar,this.lastTotal-this.lastNeed,0,this.lastNeed),this.lastChar.toString(this.encoding,0,this.lastTotal);e.copy(this.lastChar,this.lastTotal-this.lastNeed,0,e.length),this.lastNeed-=e.length}},function(e,t,n){"use strict";e.exports=i;var r=n(33),o=n(54);function i(e){if(!(this instanceof i))return new i(e);r.call(this,e),this._transformState={afterTransform:function(e,t){var n=this._transformState;n.transforming=!1;var r=n.writecb;if(!r)return this.emit("error",new Error("write callback called multiple times"));n.writechunk=null,n.writecb=null,null!=t&&this.push(t),r(e);var o=this._readableState;o.reading=!1,(o.needReadable||o.length<o.highWaterMark)&&this._read(o.highWaterMark)}.bind(this),needTransform:!1,transforming:!1,writecb:null,writechunk:null,writeencoding:null},this._readableState.needReadable=!0,this._readableState.sync=!1,e&&("function"==typeof e.transform&&(this._transform=e.transform),"function"==typeof e.flush&&(this._flush=e.flush)),this.on("prefinish",a)}function a(){var e=this;"function"==typeof this._flush?this._flush(function(t,n){u(e,t,n)}):u(this,null,null)}function u(e,t,n){if(t)return e.emit("error",t);if(null!=n&&e.push(n),e._writableState.length)throw new Error("Calling transform done when ws.length != 0");if(e._transformState.transforming)throw new Error("Calling transform done when still transforming");return e.push(null)}o.inherits=n(32),o.inherits(i,r),i.prototype.push=function(e,t){return this._transformState.needTransform=!1,r.prototype.push.call(this,e,t)},i.prototype._transform=function(e,t,n){throw new Error("_transform() is not implemented")},i.prototype._write=function(e,t,n){var r=this._transformState;if(r.writecb=n,r.writechunk=e,r.writeencoding=t,!r.transforming){var o=this._readableState;(r.needTransform||o.needReadable||o.length<o.highWaterMark)&&this._read(o.highWaterMark)}},i.prototype._read=function(e){var t=this._transformState;null!==t.writechunk&&t.writecb&&!t.transforming?(t.transforming=!0,this._transform(t.writechunk,t.writeencoding,t.afterTransform)):t.needTransform=!0},i.prototype._destroy=function(e,t){var n=this;r.prototype._destroy.call(this,e,function(e){t(e),n.emit("close")})}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.Sugarcube=t.Places365=t.Coco=t.KeyframeStatus=t.KeyframeSingle=t.KeyframeList=t.Summary=t.MediaInfo=t.MediaRecord=t.Heading=void 0;var r=p(n(502)),o=p(n(504)),i=p(n(505)),a=p(n(506)),u=p(n(507)),s=p(n(508)),c=p(n(511)),l=p(n(512)),f=p(n(513)),d=p(n(514));function p(e){return e&&e.__esModule?e:{default:e}}n(515),t.Heading=r.default,t.MediaRecord=i.default,t.MediaInfo=o.default,t.Summary=a.default,t.KeyframeList=u.default,t.KeyframeSingle=s.default,t.KeyframeStatus=c.default,t.Coco=l.default,t.Places365=f.default,t.Sugarcube=d.default},function(e,t,n){"use strict";t.decode=t.parse=n(521),t.encode=t.stringify=n(522)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=g(n(4)),o=g(n(5)),i=g(n(6)),a=g(n(7)),u=g(n(8)),s=g(n(9)),c=n(1),l=g(c),f=(n(16),n(15)),d=n(2),p=g(n(523)),h=n(17),m=n(11),v=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(21)),y=g(n(114));function g(e){return e&&e.__esModule?e:{default:e}}var _={dragging:!1,draggingBox:!1,bounds:null,mouseX:0,mouseY:0,box:{x:0,y:0,w:0,h:0}},b=function(e){function t(){(0,i.default)(this,t);var e=(0,u.default)(this,(t.__proto__||(0,o.default)(t)).call(this));return e.state=(0,r.default)({},_),e.handleMouseDown=e.handleMouseDown.bind(e),e.handleMouseDownOnBox=e.handleMouseDownOnBox.bind(e),e.handleMouseMove=e.handleMouseMove.bind(e),e.handleMouseUp=e.handleMouseUp.bind(e),e}return(0,s.default)(t,e),(0,a.default)(t,[{key:"componentDidMount",value:function(){document.body.addEventListener("mousemove",this.handleMouseMove),document.body.addEventListener("mouseup",this.handleMouseUp)}},{key:"componentDidUpdate",value:function(e){!this.state.bounds||this.props.query.query&&e.query.query&&this.props.query.query.url===e.query.query.url||this.setState((0,r.default)({},_))}},{key:"componentWillUnmount",value:function(){document.body.removeEventListener("mousemove",this.handleMouseMove),document.body.removeEventListener("mouseup",this.handleMouseUp)}},{key:"handleMouseDown",value:function(e){e.preventDefault();var t=this.imgRef.getBoundingClientRect(),n=e.pageX,r=e.pageY,o=n-t.left,i=r-t.top;this.setState({dragging:!0,bounds:t,mouseX:n,mouseY:r,box:{x:o,y:i,w:1,h:1}})}},{key:"handleMouseDownOnBox",value:function(e){var t=this.imgRef.getBoundingClientRect(),n=e.pageX,o=e.pageY;this.setState({draggingBox:!0,bounds:t,mouseX:n,mouseY:o,initialBox:(0,r.default)({},this.state.box),box:(0,r.default)({},this.state.box)})}},{key:"handleMouseMove",value:function(e){var t=this.state,n=t.dragging,r=t.draggingBox,o=t.bounds,i=t.mouseX,a=t.mouseY,u=t.initialBox,s=t.box;if(n){e.preventDefault();var c=s.x,l=s.y,f=(0,h.clamp)(e.pageX-i,0,o.width-c),d=(0,h.clamp)(e.pageY-a,0,o.height-l);this.setState({box:{x:c,y:l,w:f,h:d}})}else if(r){e.preventDefault();var p=u.x,m=u.y,v=u.w,y=u.h,g=e.pageX-i,_=e.pageY-a;this.setState({box:{x:(0,h.clamp)(p+g,0,o.width-v),y:(0,h.clamp)(m+_,0,o.height-y),w:v,h:y}})}}},{key:"handleMouseUp",value:function(e){var t=this,n=this.props.actions,o=this.state,i=o.dragging,a=o.draggingBox,u=o.bounds,s=o.box;if(i||a){e.preventDefault();var c=s.x,l=s.y,f=s.w,d=s.h,h=this.imgRef,m=document.createElement("canvas"),v=m.getContext("2d"),y=h.naturalWidth/u.width;if(m.width=f*y,m.height=d*y,f<10||d<10)this.setState({dragging:!1,draggingBox:!1,box:{x:0,y:0,w:0,h:0}});else{this.setState({dragging:!1,draggingBox:!1});var g=new Image,_=!1;g.onload=function(){if(!_){_=!0,g.onload=null,v.drawImage(g,Math.round(c*y),Math.round(l*y),Math.round(f*y),Math.round(d*y),0,0,m.width,m.height);var e=(0,p.default)(m.toDataURL("image/jpeg",.9));n.upload(e,(0,r.default)({},t.props.query.query,{crop:{x:c,y:l,w:f,h:d}}))}},g.crossOrigin="anonymous",g.src=h.src,g.complete&&g.onload()}}}},{key:"render",value:function(){var e=this,t=this.props.query.query,n=this.state.box,r=n.x,o=n.y,i=n.w,a=n.h;if(!t)return null;if(t.loading)return l.default.createElement("div",{className:"searchQuery column"},l.default.createElement("h2",null,"Loading results..."),l.default.createElement(m.Loader,null));var u=t.url;return u&&0===u.indexOf("static")&&(u="/search/"+u),l.default.createElement("div",{className:"searchQuery row"},l.default.createElement("div",{className:"searchBox"},l.default.createElement("img",{src:u,ref:function(t){return e.imgRef=t},onMouseDown:this.handleMouseDown,crossOrigin:"anonymous"}),!!i&&l.default.createElement("div",{className:"box",style:{left:r,top:o,width:i,height:a},onMouseDown:this.handleMouseDownOnBox})),l.default.createElement("div",null,l.default.createElement("h3",null,"Your Query"),l.default.createElement(y.default,{query:t})))}}]),t}(c.Component);t.default=(0,d.connect)(function(e){return{query:e.search.query,options:e.search.options}},function(e){return{actions:(0,f.bindActionCreators)((0,r.default)({},v),e)}})(b)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=f(n(4)),o=f(n(1)),i=n(16),a=n(15),u=n(2),s=(l(n(209)),n(11)),c=l(n(21));function l(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}function f(e){return e&&e.__esModule?e:{default:e}}t.default=(0,i.withRouter)((0,u.connect)(function(e){return{query:e.search.query.query,results:e.search.query.results,options:e.search.options}},function(e){return{searchActions:(0,a.bindActionCreators)((0,r.default)({},c),e)}})(function(e){var t=e.query,n=e.results,r=e.options;return!t||t.reset||t.loading||!n?o.default.createElement("div",null):t.loading||n.length?o.default.createElement("div",{className:"searchResults"},o.default.createElement("div",{className:"searchResultsHeading row"},o.default.createElement("div",{className:"column"},o.default.createElement("h3",null,"Search Results"),o.default.createElement("small",{className:"subtitle"},"Searched 10,523,176 frames from 576,234 videos (took ",t.timing.toFixed(2)," ms)"))),o.default.createElement(s.Keyframes,{frames:n,showHash:!0,showTimestamp:r.groupByHash,showSearchButton:!0,showSaveButton:!0,groupByHash:r.groupByHash})):o.default.createElement("div",{className:"searchResults"},o.default.createElement("h3",null,"No results"))}))},function(e,t,n){"use strict";var r=c(n(1)),o=c(n(214)),i=n(218),a=n(2),u=c(n(242)),s=n(94);function c(e){return e&&e.__esModule?e:{default:e}}var l=document.createElement("div");document.body.appendChild(l),o.default.render(r.default.createElement(i.AppContainer,null,r.default.createElement(a.Provider,{store:s.store},r.default.createElement(u.default,{history:s.history}))),l)},function(e,t,n){"use strict"; -/** @license React v16.5.2 - * react.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var r=n(115),o="function"==typeof Symbol&&Symbol.for,i=o?Symbol.for("react.element"):60103,a=o?Symbol.for("react.portal"):60106,u=o?Symbol.for("react.fragment"):60107,s=o?Symbol.for("react.strict_mode"):60108,c=o?Symbol.for("react.profiler"):60114,l=o?Symbol.for("react.provider"):60109,f=o?Symbol.for("react.context"):60110,d=o?Symbol.for("react.async_mode"):60111,p=o?Symbol.for("react.forward_ref"):60112;o&&Symbol.for("react.placeholder");var h="function"==typeof Symbol&&Symbol.iterator;function m(e){for(var t=arguments.length-1,n="https://reactjs.org/docs/error-decoder.html?invariant="+e,r=0;r<t;r++)n+="&args[]="+encodeURIComponent(arguments[r+1]);!function(e,t,n,r,o,i,a,u){if(!e){if(e=void 0,void 0===t)e=Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings.");else{var s=[n,r,o,i,a,u],c=0;(e=Error(t.replace(/%s/g,function(){return s[c++]}))).name="Invariant Violation"}throw e.framesToPop=1,e}}(!1,"Minified React error #"+e+"; visit %s for the full message or use the non-minified dev environment for full errors and additional helpful warnings. ",n)}var v={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},y={};function g(e,t,n){this.props=e,this.context=t,this.refs=y,this.updater=n||v}function _(){}function b(e,t,n){this.props=e,this.context=t,this.refs=y,this.updater=n||v}g.prototype.isReactComponent={},g.prototype.setState=function(e,t){"object"!=typeof e&&"function"!=typeof e&&null!=e&&m("85"),this.updater.enqueueSetState(this,e,t,"setState")},g.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")},_.prototype=g.prototype;var w=b.prototype=new _;w.constructor=b,r(w,g.prototype),w.isPureReactComponent=!0;var x={current:null,currentDispatcher:null},E=Object.prototype.hasOwnProperty,O={key:!0,ref:!0,__self:!0,__source:!0};function S(e,t,n){var r=void 0,o={},a=null,u=null;if(null!=t)for(r in void 0!==t.ref&&(u=t.ref),void 0!==t.key&&(a=""+t.key),t)E.call(t,r)&&!O.hasOwnProperty(r)&&(o[r]=t[r]);var s=arguments.length-2;if(1===s)o.children=n;else if(1<s){for(var c=Array(s),l=0;l<s;l++)c[l]=arguments[l+2];o.children=c}if(e&&e.defaultProps)for(r in s=e.defaultProps)void 0===o[r]&&(o[r]=s[r]);return{$$typeof:i,type:e,key:a,ref:u,props:o,_owner:x.current}}function T(e){return"object"==typeof e&&null!==e&&e.$$typeof===i}var k=/\/+/g,R=[];function j(e,t,n,r){if(R.length){var o=R.pop();return o.result=e,o.keyPrefix=t,o.func=n,o.context=r,o.count=0,o}return{result:e,keyPrefix:t,func:n,context:r,count:0}}function P(e){e.result=null,e.keyPrefix=null,e.func=null,e.context=null,e.count=0,10>R.length&&R.push(e)}function C(e,t,n){return null==e?0:function e(t,n,r,o){var u=typeof t;"undefined"!==u&&"boolean"!==u||(t=null);var s=!1;if(null===t)s=!0;else switch(u){case"string":case"number":s=!0;break;case"object":switch(t.$$typeof){case i:case a:s=!0}}if(s)return r(o,t,""===n?"."+M(t,0):n),1;if(s=0,n=""===n?".":n+":",Array.isArray(t))for(var c=0;c<t.length;c++){var l=n+M(u=t[c],c);s+=e(u,l,r,o)}else if(l=null===t||"object"!=typeof t?null:"function"==typeof(l=h&&t[h]||t["@@iterator"])?l:null,"function"==typeof l)for(t=l.call(t),c=0;!(u=t.next()).done;)s+=e(u=u.value,l=n+M(u,c++),r,o);else"object"===u&&m("31","[object Object]"==(r=""+t)?"object with keys {"+Object.keys(t).join(", ")+"}":r,"");return s}(e,"",t,n)}function M(e,t){return"object"==typeof e&&null!==e&&null!=e.key?function(e){var t={"=":"=0",":":"=2"};return"$"+(""+e).replace(/[=:]/g,function(e){return t[e]})}(e.key):t.toString(36)}function I(e,t){e.func.call(e.context,t,e.count++)}function A(e,t,n){var r=e.result,o=e.keyPrefix;e=e.func.call(e.context,t,e.count++),Array.isArray(e)?D(e,r,n,function(e){return e}):null!=e&&(T(e)&&(e=function(e,t){return{$$typeof:i,type:e.type,key:t,ref:e.ref,props:e.props,_owner:e._owner}}(e,o+(!e.key||t&&t.key===e.key?"":(""+e.key).replace(k,"$&/")+"/")+n)),r.push(e))}function D(e,t,n,r,o){var i="";null!=n&&(i=(""+n).replace(k,"$&/")+"/"),C(e,A,t=j(t,i,r,o)),P(t)}var N={Children:{map:function(e,t,n){if(null==e)return e;var r=[];return D(e,r,null,t,n),r},forEach:function(e,t,n){if(null==e)return e;C(e,I,t=j(null,null,t,n)),P(t)},count:function(e){return C(e,function(){return null},null)},toArray:function(e){var t=[];return D(e,t,null,function(e){return e}),t},only:function(e){return T(e)||m("143"),e}},createRef:function(){return{current:null}},Component:g,PureComponent:b,createContext:function(e,t){return void 0===t&&(t=null),(e={$$typeof:f,_calculateChangedBits:t,_currentValue:e,_currentValue2:e,Provider:null,Consumer:null,unstable_read:null}).Provider={$$typeof:l,_context:e},e.Consumer=e,e.unstable_read=function(e,t){var n=x.currentDispatcher;return null===n&&m("277"),n.readContext(e,t)}.bind(null,e),e},forwardRef:function(e){return{$$typeof:p,render:e}},Fragment:u,StrictMode:s,unstable_AsyncMode:d,unstable_Profiler:c,createElement:S,cloneElement:function(e,t,n){(null===e||void 0===e)&&m("267",e);var o=void 0,a=r({},e.props),u=e.key,s=e.ref,c=e._owner;if(null!=t){void 0!==t.ref&&(s=t.ref,c=x.current),void 0!==t.key&&(u=""+t.key);var l=void 0;for(o in e.type&&e.type.defaultProps&&(l=e.type.defaultProps),t)E.call(t,o)&&!O.hasOwnProperty(o)&&(a[o]=void 0===t[o]&&void 0!==l?l[o]:t[o])}if(1===(o=arguments.length-2))a.children=n;else if(1<o){l=Array(o);for(var f=0;f<o;f++)l[f]=arguments[f+2];a.children=l}return{$$typeof:i,type:e.type,key:u,ref:s,props:a,_owner:c}},createFactory:function(e){var t=S.bind(null,e);return t.type=e,t},isValidElement:T,version:"16.5.2",__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED:{ReactCurrentOwner:x,assign:r}},L={default:N},U=L&&N||L;e.exports=U.default||U},function(e,t,n){"use strict";!function e(){if("undefined"!=typeof __REACT_DEVTOOLS_GLOBAL_HOOK__&&"function"==typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE)try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(e)}catch(e){console.error(e)}}(),e.exports=n(215)},function(e,t,n){"use strict"; -/** @license React v16.5.2 - * react-dom.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var r=n(1),o=n(115),i=n(216);function a(e){for(var t=arguments.length-1,n="https://reactjs.org/docs/error-decoder.html?invariant="+e,r=0;r<t;r++)n+="&args[]="+encodeURIComponent(arguments[r+1]);!function(e,t,n,r,o,i,a,u){if(!e){if(e=void 0,void 0===t)e=Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings.");else{var s=[n,r,o,i,a,u],c=0;(e=Error(t.replace(/%s/g,function(){return s[c++]}))).name="Invariant Violation"}throw e.framesToPop=1,e}}(!1,"Minified React error #"+e+"; visit %s for the full message or use the non-minified dev environment for full errors and additional helpful warnings. ",n)}r||a("227");var u=!1,s=null,c=!1,l=null,f={onError:function(e){u=!0,s=e}};function d(e,t,n,r,o,i,a,c,l){u=!1,s=null,function(e,t,n,r,o,i,a,u,s){var c=Array.prototype.slice.call(arguments,3);try{t.apply(n,c)}catch(e){this.onError(e)}}.apply(f,arguments)}var p=null,h={};function m(){if(p)for(var e in h){var t=h[e],n=p.indexOf(e);if(-1<n||a("96",e),!y[n])for(var r in t.extractEvents||a("97",e),y[n]=t,n=t.eventTypes){var o=void 0,i=n[r],u=t,s=r;g.hasOwnProperty(s)&&a("99",s),g[s]=i;var c=i.phasedRegistrationNames;if(c){for(o in c)c.hasOwnProperty(o)&&v(c[o],u,s);o=!0}else i.registrationName?(v(i.registrationName,u,s),o=!0):o=!1;o||a("98",r,e)}}}function v(e,t,n){_[e]&&a("100",e),_[e]=t,b[e]=t.eventTypes[n].dependencies}var y=[],g={},_={},b={},w=null,x=null,E=null;function O(e,t,n,r){t=e.type||"unknown-event",e.currentTarget=E(r),function(e,t,n,r,o,i,f,p,h){if(d.apply(this,arguments),u){if(u){var m=s;u=!1,s=null}else a("198"),m=void 0;c||(c=!0,l=m)}}(t,n,void 0,e),e.currentTarget=null}function S(e,t){return null==t&&a("30"),null==e?t:Array.isArray(e)?Array.isArray(t)?(e.push.apply(e,t),e):(e.push(t),e):Array.isArray(t)?[e].concat(t):[e,t]}function T(e,t,n){Array.isArray(e)?e.forEach(t,n):e&&t.call(n,e)}var k=null;function R(e,t){if(e){var n=e._dispatchListeners,r=e._dispatchInstances;if(Array.isArray(n))for(var o=0;o<n.length&&!e.isPropagationStopped();o++)O(e,t,n[o],r[o]);else n&&O(e,t,n,r);e._dispatchListeners=null,e._dispatchInstances=null,e.isPersistent()||e.constructor.release(e)}}function j(e){return R(e,!0)}function P(e){return R(e,!1)}var C={injectEventPluginOrder:function(e){p&&a("101"),p=Array.prototype.slice.call(e),m()},injectEventPluginsByName:function(e){var t,n=!1;for(t in e)if(e.hasOwnProperty(t)){var r=e[t];h.hasOwnProperty(t)&&h[t]===r||(h[t]&&a("102",t),h[t]=r,n=!0)}n&&m()}};function M(e,t){var n=e.stateNode;if(!n)return null;var r=w(n);if(!r)return null;n=r[t];e:switch(t){case"onClick":case"onClickCapture":case"onDoubleClick":case"onDoubleClickCapture":case"onMouseDown":case"onMouseDownCapture":case"onMouseMove":case"onMouseMoveCapture":case"onMouseUp":case"onMouseUpCapture":(r=!r.disabled)||(r=!("button"===(e=e.type)||"input"===e||"select"===e||"textarea"===e)),e=!r;break e;default:e=!1}return e?null:(n&&"function"!=typeof n&&a("231",t,typeof n),n)}function I(e,t){if(null!==e&&(k=S(k,e)),e=k,k=null,e&&(T(e,t?j:P),k&&a("95"),c))throw t=l,c=!1,l=null,t}var A=Math.random().toString(36).slice(2),D="__reactInternalInstance$"+A,N="__reactEventHandlers$"+A;function L(e){if(e[D])return e[D];for(;!e[D];){if(!e.parentNode)return null;e=e.parentNode}return 7===(e=e[D]).tag||8===e.tag?e:null}function U(e){return!(e=e[D])||7!==e.tag&&8!==e.tag?null:e}function F(e){if(7===e.tag||8===e.tag)return e.stateNode;a("33")}function B(e){return e[N]||null}function W(e){do{e=e.return}while(e&&7!==e.tag);return e||null}function Y(e,t,n){(t=M(e,n.dispatchConfig.phasedRegistrationNames[t]))&&(n._dispatchListeners=S(n._dispatchListeners,t),n._dispatchInstances=S(n._dispatchInstances,e))}function $(e){if(e&&e.dispatchConfig.phasedRegistrationNames){for(var t=e._targetInst,n=[];t;)n.push(t),t=W(t);for(t=n.length;0<t--;)Y(n[t],"captured",e);for(t=0;t<n.length;t++)Y(n[t],"bubbled",e)}}function q(e,t,n){e&&n&&n.dispatchConfig.registrationName&&(t=M(e,n.dispatchConfig.registrationName))&&(n._dispatchListeners=S(n._dispatchListeners,t),n._dispatchInstances=S(n._dispatchInstances,e))}function H(e){e&&e.dispatchConfig.registrationName&&q(e._targetInst,null,e)}function z(e){T(e,$)}var G=!("undefined"==typeof window||!window.document||!window.document.createElement);function V(e,t){var n={};return n[e.toLowerCase()]=t.toLowerCase(),n["Webkit"+e]="webkit"+t,n["Moz"+e]="moz"+t,n}var X={animationend:V("Animation","AnimationEnd"),animationiteration:V("Animation","AnimationIteration"),animationstart:V("Animation","AnimationStart"),transitionend:V("Transition","TransitionEnd")},K={},Q={};function J(e){if(K[e])return K[e];if(!X[e])return e;var t,n=X[e];for(t in n)if(n.hasOwnProperty(t)&&t in Q)return K[e]=n[t];return e}G&&(Q=document.createElement("div").style,"AnimationEvent"in window||(delete X.animationend.animation,delete X.animationiteration.animation,delete X.animationstart.animation),"TransitionEvent"in window||delete X.transitionend.transition);var Z=J("animationend"),ee=J("animationiteration"),te=J("animationstart"),ne=J("transitionend"),re="abort canplay canplaythrough durationchange emptied encrypted ended error loadeddata loadedmetadata loadstart pause play playing progress ratechange seeked seeking stalled suspend timeupdate volumechange waiting".split(" "),oe=null,ie=null,ae=null;function ue(){if(ae)return ae;var e,t,n=ie,r=n.length,o="value"in oe?oe.value:oe.textContent,i=o.length;for(e=0;e<r&&n[e]===o[e];e++);var a=r-e;for(t=1;t<=a&&n[r-t]===o[i-t];t++);return ae=o.slice(e,1<t?1-t:void 0)}function se(){return!0}function ce(){return!1}function le(e,t,n,r){for(var o in this.dispatchConfig=e,this._targetInst=t,this.nativeEvent=n,e=this.constructor.Interface)e.hasOwnProperty(o)&&((t=e[o])?this[o]=t(n):"target"===o?this.target=r:this[o]=n[o]);return this.isDefaultPrevented=(null!=n.defaultPrevented?n.defaultPrevented:!1===n.returnValue)?se:ce,this.isPropagationStopped=ce,this}function fe(e,t,n,r){if(this.eventPool.length){var o=this.eventPool.pop();return this.call(o,e,t,n,r),o}return new this(e,t,n,r)}function de(e){e instanceof this||a("279"),e.destructor(),10>this.eventPool.length&&this.eventPool.push(e)}function pe(e){e.eventPool=[],e.getPooled=fe,e.release=de}o(le.prototype,{preventDefault:function(){this.defaultPrevented=!0;var e=this.nativeEvent;e&&(e.preventDefault?e.preventDefault():"unknown"!=typeof e.returnValue&&(e.returnValue=!1),this.isDefaultPrevented=se)},stopPropagation:function(){var e=this.nativeEvent;e&&(e.stopPropagation?e.stopPropagation():"unknown"!=typeof e.cancelBubble&&(e.cancelBubble=!0),this.isPropagationStopped=se)},persist:function(){this.isPersistent=se},isPersistent:ce,destructor:function(){var e,t=this.constructor.Interface;for(e in t)this[e]=null;this.nativeEvent=this._targetInst=this.dispatchConfig=null,this.isPropagationStopped=this.isDefaultPrevented=ce,this._dispatchInstances=this._dispatchListeners=null}}),le.Interface={type:null,target:null,currentTarget:function(){return null},eventPhase:null,bubbles:null,cancelable:null,timeStamp:function(e){return e.timeStamp||Date.now()},defaultPrevented:null,isTrusted:null},le.extend=function(e){function t(){}function n(){return r.apply(this,arguments)}var r=this;t.prototype=r.prototype;var i=new t;return o(i,n.prototype),n.prototype=i,n.prototype.constructor=n,n.Interface=o({},r.Interface,e),n.extend=r.extend,pe(n),n},pe(le);var he=le.extend({data:null}),me=le.extend({data:null}),ve=[9,13,27,32],ye=G&&"CompositionEvent"in window,ge=null;G&&"documentMode"in document&&(ge=document.documentMode);var _e=G&&"TextEvent"in window&&!ge,be=G&&(!ye||ge&&8<ge&&11>=ge),we=String.fromCharCode(32),xe={beforeInput:{phasedRegistrationNames:{bubbled:"onBeforeInput",captured:"onBeforeInputCapture"},dependencies:["compositionend","keypress","textInput","paste"]},compositionEnd:{phasedRegistrationNames:{bubbled:"onCompositionEnd",captured:"onCompositionEndCapture"},dependencies:"blur compositionend keydown keypress keyup mousedown".split(" ")},compositionStart:{phasedRegistrationNames:{bubbled:"onCompositionStart",captured:"onCompositionStartCapture"},dependencies:"blur compositionstart keydown keypress keyup mousedown".split(" ")},compositionUpdate:{phasedRegistrationNames:{bubbled:"onCompositionUpdate",captured:"onCompositionUpdateCapture"},dependencies:"blur compositionupdate keydown keypress keyup mousedown".split(" ")}},Ee=!1;function Oe(e,t){switch(e){case"keyup":return-1!==ve.indexOf(t.keyCode);case"keydown":return 229!==t.keyCode;case"keypress":case"mousedown":case"blur":return!0;default:return!1}}function Se(e){return"object"==typeof(e=e.detail)&&"data"in e?e.data:null}var Te=!1;var ke={eventTypes:xe,extractEvents:function(e,t,n,r){var o=void 0,i=void 0;if(ye)e:{switch(e){case"compositionstart":o=xe.compositionStart;break e;case"compositionend":o=xe.compositionEnd;break e;case"compositionupdate":o=xe.compositionUpdate;break e}o=void 0}else Te?Oe(e,n)&&(o=xe.compositionEnd):"keydown"===e&&229===n.keyCode&&(o=xe.compositionStart);return o?(be&&"ko"!==n.locale&&(Te||o!==xe.compositionStart?o===xe.compositionEnd&&Te&&(i=ue()):(ie="value"in(oe=r)?oe.value:oe.textContent,Te=!0)),o=he.getPooled(o,t,n,r),i?o.data=i:null!==(i=Se(n))&&(o.data=i),z(o),i=o):i=null,(e=_e?function(e,t){switch(e){case"compositionend":return Se(t);case"keypress":return 32!==t.which?null:(Ee=!0,we);case"textInput":return(e=t.data)===we&&Ee?null:e;default:return null}}(e,n):function(e,t){if(Te)return"compositionend"===e||!ye&&Oe(e,t)?(e=ue(),ae=ie=oe=null,Te=!1,e):null;switch(e){case"paste":return null;case"keypress":if(!(t.ctrlKey||t.altKey||t.metaKey)||t.ctrlKey&&t.altKey){if(t.char&&1<t.char.length)return t.char;if(t.which)return String.fromCharCode(t.which)}return null;case"compositionend":return be&&"ko"!==t.locale?null:t.data;default:return null}}(e,n))?((t=me.getPooled(xe.beforeInput,t,n,r)).data=e,z(t)):t=null,null===i?t:null===t?i:[i,t]}},Re=null,je=null,Pe=null;function Ce(e){if(e=x(e)){"function"!=typeof Re&&a("280");var t=w(e.stateNode);Re(e.stateNode,e.type,t)}}function Me(e){je?Pe?Pe.push(e):Pe=[e]:je=e}function Ie(){if(je){var e=je,t=Pe;if(Pe=je=null,Ce(e),t)for(e=0;e<t.length;e++)Ce(t[e])}}function Ae(e,t){return e(t)}function De(e,t,n){return e(t,n)}function Ne(){}var Le=!1;function Ue(e,t){if(Le)return e(t);Le=!0;try{return Ae(e,t)}finally{Le=!1,(null!==je||null!==Pe)&&(Ne(),Ie())}}var Fe={color:!0,date:!0,datetime:!0,"datetime-local":!0,email:!0,month:!0,number:!0,password:!0,range:!0,search:!0,tel:!0,text:!0,time:!0,url:!0,week:!0};function Be(e){var t=e&&e.nodeName&&e.nodeName.toLowerCase();return"input"===t?!!Fe[e.type]:"textarea"===t}function We(e){return(e=e.target||e.srcElement||window).correspondingUseElement&&(e=e.correspondingUseElement),3===e.nodeType?e.parentNode:e}function Ye(e){if(!G)return!1;var t=(e="on"+e)in document;return t||((t=document.createElement("div")).setAttribute(e,"return;"),t="function"==typeof t[e]),t}function $e(e){var t=e.type;return(e=e.nodeName)&&"input"===e.toLowerCase()&&("checkbox"===t||"radio"===t)}function qe(e){e._valueTracker||(e._valueTracker=function(e){var t=$e(e)?"checked":"value",n=Object.getOwnPropertyDescriptor(e.constructor.prototype,t),r=""+e[t];if(!e.hasOwnProperty(t)&&void 0!==n&&"function"==typeof n.get&&"function"==typeof n.set){var o=n.get,i=n.set;return Object.defineProperty(e,t,{configurable:!0,get:function(){return o.call(this)},set:function(e){r=""+e,i.call(this,e)}}),Object.defineProperty(e,t,{enumerable:n.enumerable}),{getValue:function(){return r},setValue:function(e){r=""+e},stopTracking:function(){e._valueTracker=null,delete e[t]}}}}(e))}function He(e){if(!e)return!1;var t=e._valueTracker;if(!t)return!0;var n=t.getValue(),r="";return e&&(r=$e(e)?e.checked?"true":"false":e.value),(e=r)!==n&&(t.setValue(e),!0)}var ze=r.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED,Ge=/^(.*)[\\\/]/,Ve="function"==typeof Symbol&&Symbol.for,Xe=Ve?Symbol.for("react.element"):60103,Ke=Ve?Symbol.for("react.portal"):60106,Qe=Ve?Symbol.for("react.fragment"):60107,Je=Ve?Symbol.for("react.strict_mode"):60108,Ze=Ve?Symbol.for("react.profiler"):60114,et=Ve?Symbol.for("react.provider"):60109,tt=Ve?Symbol.for("react.context"):60110,nt=Ve?Symbol.for("react.async_mode"):60111,rt=Ve?Symbol.for("react.forward_ref"):60112,ot=Ve?Symbol.for("react.placeholder"):60113,it="function"==typeof Symbol&&Symbol.iterator;function at(e){return null===e||"object"!=typeof e?null:"function"==typeof(e=it&&e[it]||e["@@iterator"])?e:null}function ut(e){if(null==e)return null;if("function"==typeof e)return e.displayName||e.name||null;if("string"==typeof e)return e;switch(e){case nt:return"AsyncMode";case Qe:return"Fragment";case Ke:return"Portal";case Ze:return"Profiler";case Je:return"StrictMode";case ot:return"Placeholder"}if("object"==typeof e){switch(e.$$typeof){case tt:return"Context.Consumer";case et:return"Context.Provider";case rt:var t=e.render;return t=t.displayName||t.name||"",e.displayName||(""!==t?"ForwardRef("+t+")":"ForwardRef")}if("function"==typeof e.then&&(e=1===e._reactStatus?e._reactResult:null))return ut(e)}return null}function st(e){var t="";do{e:switch(e.tag){case 4:case 0:case 1:case 2:case 3:case 7:case 10:var n=e._debugOwner,r=e._debugSource,o=ut(e.type),i=null;n&&(i=ut(n.type)),n=o,o="",r?o=" (at "+r.fileName.replace(Ge,"")+":"+r.lineNumber+")":i&&(o=" (created by "+i+")"),i="\n in "+(n||"Unknown")+o;break e;default:i=""}t+=i,e=e.return}while(e);return t}var ct=/^[:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD][:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\-.0-9\u00B7\u0300-\u036F\u203F-\u2040]*$/,lt=Object.prototype.hasOwnProperty,ft={},dt={};function pt(e,t,n,r,o){this.acceptsBooleans=2===t||3===t||4===t,this.attributeName=r,this.attributeNamespace=o,this.mustUseProperty=n,this.propertyName=e,this.type=t}var ht={};"children dangerouslySetInnerHTML defaultValue defaultChecked innerHTML suppressContentEditableWarning suppressHydrationWarning style".split(" ").forEach(function(e){ht[e]=new pt(e,0,!1,e,null)}),[["acceptCharset","accept-charset"],["className","class"],["htmlFor","for"],["httpEquiv","http-equiv"]].forEach(function(e){var t=e[0];ht[t]=new pt(t,1,!1,e[1],null)}),["contentEditable","draggable","spellCheck","value"].forEach(function(e){ht[e]=new pt(e,2,!1,e.toLowerCase(),null)}),["autoReverse","externalResourcesRequired","focusable","preserveAlpha"].forEach(function(e){ht[e]=new pt(e,2,!1,e,null)}),"allowFullScreen async autoFocus autoPlay controls default defer disabled formNoValidate hidden loop noModule noValidate open playsInline readOnly required reversed scoped seamless itemScope".split(" ").forEach(function(e){ht[e]=new pt(e,3,!1,e.toLowerCase(),null)}),["checked","multiple","muted","selected"].forEach(function(e){ht[e]=new pt(e,3,!0,e,null)}),["capture","download"].forEach(function(e){ht[e]=new pt(e,4,!1,e,null)}),["cols","rows","size","span"].forEach(function(e){ht[e]=new pt(e,6,!1,e,null)}),["rowSpan","start"].forEach(function(e){ht[e]=new pt(e,5,!1,e.toLowerCase(),null)});var mt=/[\-:]([a-z])/g;function vt(e){return e[1].toUpperCase()}function yt(e,t,n,r){var o=ht.hasOwnProperty(t)?ht[t]:null;(null!==o?0===o.type:!r&&(2<t.length&&("o"===t[0]||"O"===t[0])&&("n"===t[1]||"N"===t[1])))||(function(e,t,n,r){if(null===t||void 0===t||function(e,t,n,r){if(null!==n&&0===n.type)return!1;switch(typeof t){case"function":case"symbol":return!0;case"boolean":return!r&&(null!==n?!n.acceptsBooleans:"data-"!==(e=e.toLowerCase().slice(0,5))&&"aria-"!==e);default:return!1}}(e,t,n,r))return!0;if(r)return!1;if(null!==n)switch(n.type){case 3:return!t;case 4:return!1===t;case 5:return isNaN(t);case 6:return isNaN(t)||1>t}return!1}(t,n,o,r)&&(n=null),r||null===o?function(e){return!!lt.call(dt,e)||!lt.call(ft,e)&&(ct.test(e)?dt[e]=!0:(ft[e]=!0,!1))}(t)&&(null===n?e.removeAttribute(t):e.setAttribute(t,""+n)):o.mustUseProperty?e[o.propertyName]=null===n?3!==o.type&&"":n:(t=o.attributeName,r=o.attributeNamespace,null===n?e.removeAttribute(t):(n=3===(o=o.type)||4===o&&!0===n?"":""+n,r?e.setAttributeNS(r,t,n):e.setAttribute(t,n))))}function gt(e){switch(typeof e){case"boolean":case"number":case"object":case"string":case"undefined":return e;default:return""}}function _t(e,t){var n=t.checked;return o({},t,{defaultChecked:void 0,defaultValue:void 0,value:void 0,checked:null!=n?n:e._wrapperState.initialChecked})}function bt(e,t){var n=null==t.defaultValue?"":t.defaultValue,r=null!=t.checked?t.checked:t.defaultChecked;n=gt(null!=t.value?t.value:n),e._wrapperState={initialChecked:r,initialValue:n,controlled:"checkbox"===t.type||"radio"===t.type?null!=t.checked:null!=t.value}}function wt(e,t){null!=(t=t.checked)&&yt(e,"checked",t,!1)}function xt(e,t){wt(e,t);var n=gt(t.value),r=t.type;if(null!=n)"number"===r?(0===n&&""===e.value||e.value!=n)&&(e.value=""+n):e.value!==""+n&&(e.value=""+n);else if("submit"===r||"reset"===r)return void e.removeAttribute("value");t.hasOwnProperty("value")?Ot(e,t.type,n):t.hasOwnProperty("defaultValue")&&Ot(e,t.type,gt(t.defaultValue)),null==t.checked&&null!=t.defaultChecked&&(e.defaultChecked=!!t.defaultChecked)}function Et(e,t,n){if(t.hasOwnProperty("value")||t.hasOwnProperty("defaultValue")){var r=t.type;if(!("submit"!==r&&"reset"!==r||void 0!==t.value&&null!==t.value))return;t=""+e._wrapperState.initialValue,n||t===e.value||(e.value=t),e.defaultValue=t}""!==(n=e.name)&&(e.name=""),e.defaultChecked=!e.defaultChecked,e.defaultChecked=!!e._wrapperState.initialChecked,""!==n&&(e.name=n)}function Ot(e,t,n){"number"===t&&e.ownerDocument.activeElement===e||(null==n?e.defaultValue=""+e._wrapperState.initialValue:e.defaultValue!==""+n&&(e.defaultValue=""+n))}"accent-height alignment-baseline arabic-form baseline-shift cap-height clip-path clip-rule color-interpolation color-interpolation-filters color-profile color-rendering dominant-baseline enable-background fill-opacity fill-rule flood-color flood-opacity font-family font-size font-size-adjust font-stretch font-style font-variant font-weight glyph-name glyph-orientation-horizontal glyph-orientation-vertical horiz-adv-x horiz-origin-x image-rendering letter-spacing lighting-color marker-end marker-mid marker-start overline-position overline-thickness paint-order panose-1 pointer-events rendering-intent shape-rendering stop-color stop-opacity strikethrough-position strikethrough-thickness stroke-dasharray stroke-dashoffset stroke-linecap stroke-linejoin stroke-miterlimit stroke-opacity stroke-width text-anchor text-decoration text-rendering underline-position underline-thickness unicode-bidi unicode-range units-per-em v-alphabetic v-hanging v-ideographic v-mathematical vector-effect vert-adv-y vert-origin-x vert-origin-y word-spacing writing-mode xmlns:xlink x-height".split(" ").forEach(function(e){var t=e.replace(mt,vt);ht[t]=new pt(t,1,!1,e,null)}),"xlink:actuate xlink:arcrole xlink:href xlink:role xlink:show xlink:title xlink:type".split(" ").forEach(function(e){var t=e.replace(mt,vt);ht[t]=new pt(t,1,!1,e,"http://www.w3.org/1999/xlink")}),["xml:base","xml:lang","xml:space"].forEach(function(e){var t=e.replace(mt,vt);ht[t]=new pt(t,1,!1,e,"http://www.w3.org/XML/1998/namespace")}),ht.tabIndex=new pt("tabIndex",1,!1,"tabindex",null);var St={change:{phasedRegistrationNames:{bubbled:"onChange",captured:"onChangeCapture"},dependencies:"blur change click focus input keydown keyup selectionchange".split(" ")}};function Tt(e,t,n){return(e=le.getPooled(St.change,e,t,n)).type="change",Me(n),z(e),e}var kt=null,Rt=null;function jt(e){I(e,!1)}function Pt(e){if(He(F(e)))return e}function Ct(e,t){if("change"===e)return t}var Mt=!1;function It(){kt&&(kt.detachEvent("onpropertychange",At),Rt=kt=null)}function At(e){"value"===e.propertyName&&Pt(Rt)&&Ue(jt,e=Tt(Rt,e,We(e)))}function Dt(e,t,n){"focus"===e?(It(),Rt=n,(kt=t).attachEvent("onpropertychange",At)):"blur"===e&&It()}function Nt(e){if("selectionchange"===e||"keyup"===e||"keydown"===e)return Pt(Rt)}function Lt(e,t){if("click"===e)return Pt(t)}function Ut(e,t){if("input"===e||"change"===e)return Pt(t)}G&&(Mt=Ye("input")&&(!document.documentMode||9<document.documentMode));var Ft={eventTypes:St,_isInputEventSupported:Mt,extractEvents:function(e,t,n,r){var o=t?F(t):window,i=void 0,a=void 0,u=o.nodeName&&o.nodeName.toLowerCase();if("select"===u||"input"===u&&"file"===o.type?i=Ct:Be(o)?Mt?i=Ut:(i=Nt,a=Dt):(u=o.nodeName)&&"input"===u.toLowerCase()&&("checkbox"===o.type||"radio"===o.type)&&(i=Lt),i&&(i=i(e,t)))return Tt(i,n,r);a&&a(e,o,t),"blur"===e&&(e=o._wrapperState)&&e.controlled&&"number"===o.type&&Ot(o,"number",o.value)}},Bt=le.extend({view:null,detail:null}),Wt={Alt:"altKey",Control:"ctrlKey",Meta:"metaKey",Shift:"shiftKey"};function Yt(e){var t=this.nativeEvent;return t.getModifierState?t.getModifierState(e):!!(e=Wt[e])&&!!t[e]}function $t(){return Yt}var qt=0,Ht=0,zt=!1,Gt=!1,Vt=Bt.extend({screenX:null,screenY:null,clientX:null,clientY:null,pageX:null,pageY:null,ctrlKey:null,shiftKey:null,altKey:null,metaKey:null,getModifierState:$t,button:null,buttons:null,relatedTarget:function(e){return e.relatedTarget||(e.fromElement===e.srcElement?e.toElement:e.fromElement)},movementX:function(e){if("movementX"in e)return e.movementX;var t=qt;return qt=e.screenX,zt?"mousemove"===e.type?e.screenX-t:0:(zt=!0,0)},movementY:function(e){if("movementY"in e)return e.movementY;var t=Ht;return Ht=e.screenY,Gt?"mousemove"===e.type?e.screenY-t:0:(Gt=!0,0)}}),Xt=Vt.extend({pointerId:null,width:null,height:null,pressure:null,tangentialPressure:null,tiltX:null,tiltY:null,twist:null,pointerType:null,isPrimary:null}),Kt={mouseEnter:{registrationName:"onMouseEnter",dependencies:["mouseout","mouseover"]},mouseLeave:{registrationName:"onMouseLeave",dependencies:["mouseout","mouseover"]},pointerEnter:{registrationName:"onPointerEnter",dependencies:["pointerout","pointerover"]},pointerLeave:{registrationName:"onPointerLeave",dependencies:["pointerout","pointerover"]}},Qt={eventTypes:Kt,extractEvents:function(e,t,n,r){var o="mouseover"===e||"pointerover"===e,i="mouseout"===e||"pointerout"===e;if(o&&(n.relatedTarget||n.fromElement)||!i&&!o)return null;if(o=r.window===r?r:(o=r.ownerDocument)?o.defaultView||o.parentWindow:window,i?(i=t,t=(t=n.relatedTarget||n.toElement)?L(t):null):i=null,i===t)return null;var a=void 0,u=void 0,s=void 0,c=void 0;"mouseout"===e||"mouseover"===e?(a=Vt,u=Kt.mouseLeave,s=Kt.mouseEnter,c="mouse"):"pointerout"!==e&&"pointerover"!==e||(a=Xt,u=Kt.pointerLeave,s=Kt.pointerEnter,c="pointer");var l=null==i?o:F(i);if(o=null==t?o:F(t),(e=a.getPooled(u,i,n,r)).type=c+"leave",e.target=l,e.relatedTarget=o,(n=a.getPooled(s,t,n,r)).type=c+"enter",n.target=o,n.relatedTarget=l,r=t,i&&r)e:{for(o=r,c=0,a=t=i;a;a=W(a))c++;for(a=0,s=o;s;s=W(s))a++;for(;0<c-a;)t=W(t),c--;for(;0<a-c;)o=W(o),a--;for(;c--;){if(t===o||t===o.alternate)break e;t=W(t),o=W(o)}t=null}else t=null;for(o=t,t=[];i&&i!==o&&(null===(c=i.alternate)||c!==o);)t.push(i),i=W(i);for(i=[];r&&r!==o&&(null===(c=r.alternate)||c!==o);)i.push(r),r=W(r);for(r=0;r<t.length;r++)q(t[r],"bubbled",e);for(r=i.length;0<r--;)q(i[r],"captured",n);return[e,n]}},Jt=Object.prototype.hasOwnProperty;function Zt(e,t){return e===t?0!==e||0!==t||1/e==1/t:e!=e&&t!=t}function en(e,t){if(Zt(e,t))return!0;if("object"!=typeof e||null===e||"object"!=typeof t||null===t)return!1;var n=Object.keys(e),r=Object.keys(t);if(n.length!==r.length)return!1;for(r=0;r<n.length;r++)if(!Jt.call(t,n[r])||!Zt(e[n[r]],t[n[r]]))return!1;return!0}function tn(e){var t=e;if(e.alternate)for(;t.return;)t=t.return;else{if(0!=(2&t.effectTag))return 1;for(;t.return;)if(0!=(2&(t=t.return).effectTag))return 1}return 5===t.tag?2:3}function nn(e){2!==tn(e)&&a("188")}function rn(e){if(!(e=function(e){var t=e.alternate;if(!t)return 3===(t=tn(e))&&a("188"),1===t?null:e;for(var n=e,r=t;;){var o=n.return,i=o?o.alternate:null;if(!o||!i)break;if(o.child===i.child){for(var u=o.child;u;){if(u===n)return nn(o),e;if(u===r)return nn(o),t;u=u.sibling}a("188")}if(n.return!==r.return)n=o,r=i;else{u=!1;for(var s=o.child;s;){if(s===n){u=!0,n=o,r=i;break}if(s===r){u=!0,r=o,n=i;break}s=s.sibling}if(!u){for(s=i.child;s;){if(s===n){u=!0,n=i,r=o;break}if(s===r){u=!0,r=i,n=o;break}s=s.sibling}u||a("189")}}n.alternate!==r&&a("190")}return 5!==n.tag&&a("188"),n.stateNode.current===n?e:t}(e)))return null;for(var t=e;;){if(7===t.tag||8===t.tag)return t;if(t.child)t.child.return=t,t=t.child;else{if(t===e)break;for(;!t.sibling;){if(!t.return||t.return===e)return null;t=t.return}t.sibling.return=t.return,t=t.sibling}}return null}var on=le.extend({animationName:null,elapsedTime:null,pseudoElement:null}),an=le.extend({clipboardData:function(e){return"clipboardData"in e?e.clipboardData:window.clipboardData}}),un=Bt.extend({relatedTarget:null});function sn(e){var t=e.keyCode;return"charCode"in e?0===(e=e.charCode)&&13===t&&(e=13):e=t,10===e&&(e=13),32<=e||13===e?e:0}var cn={Esc:"Escape",Spacebar:" ",Left:"ArrowLeft",Up:"ArrowUp",Right:"ArrowRight",Down:"ArrowDown",Del:"Delete",Win:"OS",Menu:"ContextMenu",Apps:"ContextMenu",Scroll:"ScrollLock",MozPrintableKey:"Unidentified"},ln={8:"Backspace",9:"Tab",12:"Clear",13:"Enter",16:"Shift",17:"Control",18:"Alt",19:"Pause",20:"CapsLock",27:"Escape",32:" ",33:"PageUp",34:"PageDown",35:"End",36:"Home",37:"ArrowLeft",38:"ArrowUp",39:"ArrowRight",40:"ArrowDown",45:"Insert",46:"Delete",112:"F1",113:"F2",114:"F3",115:"F4",116:"F5",117:"F6",118:"F7",119:"F8",120:"F9",121:"F10",122:"F11",123:"F12",144:"NumLock",145:"ScrollLock",224:"Meta"},fn=Bt.extend({key:function(e){if(e.key){var t=cn[e.key]||e.key;if("Unidentified"!==t)return t}return"keypress"===e.type?13===(e=sn(e))?"Enter":String.fromCharCode(e):"keydown"===e.type||"keyup"===e.type?ln[e.keyCode]||"Unidentified":""},location:null,ctrlKey:null,shiftKey:null,altKey:null,metaKey:null,repeat:null,locale:null,getModifierState:$t,charCode:function(e){return"keypress"===e.type?sn(e):0},keyCode:function(e){return"keydown"===e.type||"keyup"===e.type?e.keyCode:0},which:function(e){return"keypress"===e.type?sn(e):"keydown"===e.type||"keyup"===e.type?e.keyCode:0}}),dn=Vt.extend({dataTransfer:null}),pn=Bt.extend({touches:null,targetTouches:null,changedTouches:null,altKey:null,metaKey:null,ctrlKey:null,shiftKey:null,getModifierState:$t}),hn=le.extend({propertyName:null,elapsedTime:null,pseudoElement:null}),mn=Vt.extend({deltaX:function(e){return"deltaX"in e?e.deltaX:"wheelDeltaX"in e?-e.wheelDeltaX:0},deltaY:function(e){return"deltaY"in e?e.deltaY:"wheelDeltaY"in e?-e.wheelDeltaY:"wheelDelta"in e?-e.wheelDelta:0},deltaZ:null,deltaMode:null}),vn=[["abort","abort"],[Z,"animationEnd"],[ee,"animationIteration"],[te,"animationStart"],["canplay","canPlay"],["canplaythrough","canPlayThrough"],["drag","drag"],["dragenter","dragEnter"],["dragexit","dragExit"],["dragleave","dragLeave"],["dragover","dragOver"],["durationchange","durationChange"],["emptied","emptied"],["encrypted","encrypted"],["ended","ended"],["error","error"],["gotpointercapture","gotPointerCapture"],["load","load"],["loadeddata","loadedData"],["loadedmetadata","loadedMetadata"],["loadstart","loadStart"],["lostpointercapture","lostPointerCapture"],["mousemove","mouseMove"],["mouseout","mouseOut"],["mouseover","mouseOver"],["playing","playing"],["pointermove","pointerMove"],["pointerout","pointerOut"],["pointerover","pointerOver"],["progress","progress"],["scroll","scroll"],["seeking","seeking"],["stalled","stalled"],["suspend","suspend"],["timeupdate","timeUpdate"],["toggle","toggle"],["touchmove","touchMove"],[ne,"transitionEnd"],["waiting","waiting"],["wheel","wheel"]],yn={},gn={};function _n(e,t){var n=e[0],r="on"+((e=e[1])[0].toUpperCase()+e.slice(1));t={phasedRegistrationNames:{bubbled:r,captured:r+"Capture"},dependencies:[n],isInteractive:t},yn[e]=t,gn[n]=t}[["blur","blur"],["cancel","cancel"],["click","click"],["close","close"],["contextmenu","contextMenu"],["copy","copy"],["cut","cut"],["auxclick","auxClick"],["dblclick","doubleClick"],["dragend","dragEnd"],["dragstart","dragStart"],["drop","drop"],["focus","focus"],["input","input"],["invalid","invalid"],["keydown","keyDown"],["keypress","keyPress"],["keyup","keyUp"],["mousedown","mouseDown"],["mouseup","mouseUp"],["paste","paste"],["pause","pause"],["play","play"],["pointercancel","pointerCancel"],["pointerdown","pointerDown"],["pointerup","pointerUp"],["ratechange","rateChange"],["reset","reset"],["seeked","seeked"],["submit","submit"],["touchcancel","touchCancel"],["touchend","touchEnd"],["touchstart","touchStart"],["volumechange","volumeChange"]].forEach(function(e){_n(e,!0)}),vn.forEach(function(e){_n(e,!1)});var bn={eventTypes:yn,isInteractiveTopLevelEventType:function(e){return void 0!==(e=gn[e])&&!0===e.isInteractive},extractEvents:function(e,t,n,r){var o=gn[e];if(!o)return null;switch(e){case"keypress":if(0===sn(n))return null;case"keydown":case"keyup":e=fn;break;case"blur":case"focus":e=un;break;case"click":if(2===n.button)return null;case"auxclick":case"dblclick":case"mousedown":case"mousemove":case"mouseup":case"mouseout":case"mouseover":case"contextmenu":e=Vt;break;case"drag":case"dragend":case"dragenter":case"dragexit":case"dragleave":case"dragover":case"dragstart":case"drop":e=dn;break;case"touchcancel":case"touchend":case"touchmove":case"touchstart":e=pn;break;case Z:case ee:case te:e=on;break;case ne:e=hn;break;case"scroll":e=Bt;break;case"wheel":e=mn;break;case"copy":case"cut":case"paste":e=an;break;case"gotpointercapture":case"lostpointercapture":case"pointercancel":case"pointerdown":case"pointermove":case"pointerout":case"pointerover":case"pointerup":e=Xt;break;default:e=le}return z(t=e.getPooled(o,t,n,r)),t}},wn=bn.isInteractiveTopLevelEventType,xn=[];function En(e){var t=e.targetInst,n=t;do{if(!n){e.ancestors.push(n);break}var r;for(r=n;r.return;)r=r.return;if(!(r=5!==r.tag?null:r.stateNode.containerInfo))break;e.ancestors.push(n),n=L(r)}while(n);for(n=0;n<e.ancestors.length;n++){t=e.ancestors[n];var o=We(e.nativeEvent);r=e.topLevelType;for(var i=e.nativeEvent,a=null,u=0;u<y.length;u++){var s=y[u];s&&(s=s.extractEvents(r,t,i,o))&&(a=S(a,s))}I(a,!1)}}var On=!0;function Sn(e,t){if(!t)return null;var n=(wn(e)?kn:Rn).bind(null,e);t.addEventListener(e,n,!1)}function Tn(e,t){if(!t)return null;var n=(wn(e)?kn:Rn).bind(null,e);t.addEventListener(e,n,!0)}function kn(e,t){De(Rn,e,t)}function Rn(e,t){if(On){var n=We(t);if(null===(n=L(n))||"number"!=typeof n.tag||2===tn(n)||(n=null),xn.length){var r=xn.pop();r.topLevelType=e,r.nativeEvent=t,r.targetInst=n,e=r}else e={topLevelType:e,nativeEvent:t,targetInst:n,ancestors:[]};try{Ue(En,e)}finally{e.topLevelType=null,e.nativeEvent=null,e.targetInst=null,e.ancestors.length=0,10>xn.length&&xn.push(e)}}}var jn={},Pn=0,Cn="_reactListenersID"+(""+Math.random()).slice(2);function Mn(e){return Object.prototype.hasOwnProperty.call(e,Cn)||(e[Cn]=Pn++,jn[e[Cn]]={}),jn[e[Cn]]}function In(e){if(void 0===(e=e||("undefined"!=typeof document?document:void 0)))return null;try{return e.activeElement||e.body}catch(t){return e.body}}function An(e){for(;e&&e.firstChild;)e=e.firstChild;return e}function Dn(e,t){var n,r=An(e);for(e=0;r;){if(3===r.nodeType){if(n=e+r.textContent.length,e<=t&&n>=t)return{node:r,offset:t-e};e=n}e:{for(;r;){if(r.nextSibling){r=r.nextSibling;break e}r=r.parentNode}r=void 0}r=An(r)}}function Nn(){for(var e=window,t=In();t instanceof e.HTMLIFrameElement;){try{e=t.contentDocument.defaultView}catch(e){break}t=In(e.document)}return t}function Ln(e){var t=e&&e.nodeName&&e.nodeName.toLowerCase();return t&&("input"===t&&("text"===e.type||"search"===e.type||"tel"===e.type||"url"===e.type||"password"===e.type)||"textarea"===t||"true"===e.contentEditable)}var Un=G&&"documentMode"in document&&11>=document.documentMode,Fn={select:{phasedRegistrationNames:{bubbled:"onSelect",captured:"onSelectCapture"},dependencies:"blur contextmenu dragend focus keydown keyup mousedown mouseup selectionchange".split(" ")}},Bn=null,Wn=null,Yn=null,$n=!1;function qn(e,t){var n=t.window===t?t.document:9===t.nodeType?t:t.ownerDocument;return $n||null==Bn||Bn!==In(n)?null:("selectionStart"in(n=Bn)&&Ln(n)?n={start:n.selectionStart,end:n.selectionEnd}:n={anchorNode:(n=(n.ownerDocument&&n.ownerDocument.defaultView||window).getSelection()).anchorNode,anchorOffset:n.anchorOffset,focusNode:n.focusNode,focusOffset:n.focusOffset},Yn&&en(Yn,n)?null:(Yn=n,(e=le.getPooled(Fn.select,Wn,e,t)).type="select",e.target=Bn,z(e),e))}var Hn={eventTypes:Fn,extractEvents:function(e,t,n,r){var o,i=r.window===r?r.document:9===r.nodeType?r:r.ownerDocument;if(!(o=!i)){e:{i=Mn(i),o=b.onSelect;for(var a=0;a<o.length;a++){var u=o[a];if(!i.hasOwnProperty(u)||!i[u]){i=!1;break e}}i=!0}o=!i}if(o)return null;switch(i=t?F(t):window,e){case"focus":(Be(i)||"true"===i.contentEditable)&&(Bn=i,Wn=t,Yn=null);break;case"blur":Yn=Wn=Bn=null;break;case"mousedown":$n=!0;break;case"contextmenu":case"mouseup":case"dragend":return $n=!1,qn(n,r);case"selectionchange":if(Un)break;case"keydown":case"keyup":return qn(n,r)}return null}};function zn(e,t){return e=o({children:void 0},t),(t=function(e){var t="";return r.Children.forEach(e,function(e){null!=e&&(t+=e)}),t}(t.children))&&(e.children=t),e}function Gn(e,t,n,r){if(e=e.options,t){t={};for(var o=0;o<n.length;o++)t["$"+n[o]]=!0;for(n=0;n<e.length;n++)o=t.hasOwnProperty("$"+e[n].value),e[n].selected!==o&&(e[n].selected=o),o&&r&&(e[n].defaultSelected=!0)}else{for(n=""+gt(n),t=null,o=0;o<e.length;o++){if(e[o].value===n)return e[o].selected=!0,void(r&&(e[o].defaultSelected=!0));null!==t||e[o].disabled||(t=e[o])}null!==t&&(t.selected=!0)}}function Vn(e,t){return null!=t.dangerouslySetInnerHTML&&a("91"),o({},t,{value:void 0,defaultValue:void 0,children:""+e._wrapperState.initialValue})}function Xn(e,t){var n=t.value;null==n&&(n=t.defaultValue,null!=(t=t.children)&&(null!=n&&a("92"),Array.isArray(t)&&(1>=t.length||a("93"),t=t[0]),n=t),null==n&&(n="")),e._wrapperState={initialValue:gt(n)}}function Kn(e,t){var n=gt(t.value),r=gt(t.defaultValue);null!=n&&((n=""+n)!==e.value&&(e.value=n),null==t.defaultValue&&e.defaultValue!==n&&(e.defaultValue=n)),null!=r&&(e.defaultValue=""+r)}function Qn(e){var t=e.textContent;t===e._wrapperState.initialValue&&(e.value=t)}C.injectEventPluginOrder("ResponderEventPlugin SimpleEventPlugin EnterLeaveEventPlugin ChangeEventPlugin SelectEventPlugin BeforeInputEventPlugin".split(" ")),w=B,x=U,E=F,C.injectEventPluginsByName({SimpleEventPlugin:bn,EnterLeaveEventPlugin:Qt,ChangeEventPlugin:Ft,SelectEventPlugin:Hn,BeforeInputEventPlugin:ke});var Jn={html:"http://www.w3.org/1999/xhtml",mathml:"http://www.w3.org/1998/Math/MathML",svg:"http://www.w3.org/2000/svg"};function Zn(e){switch(e){case"svg":return"http://www.w3.org/2000/svg";case"math":return"http://www.w3.org/1998/Math/MathML";default:return"http://www.w3.org/1999/xhtml"}}function er(e,t){return null==e||"http://www.w3.org/1999/xhtml"===e?Zn(t):"http://www.w3.org/2000/svg"===e&&"foreignObject"===t?"http://www.w3.org/1999/xhtml":e}var tr=void 0,nr=function(e){return"undefined"!=typeof MSApp&&MSApp.execUnsafeLocalFunction?function(t,n,r,o){MSApp.execUnsafeLocalFunction(function(){return e(t,n)})}:e}(function(e,t){if(e.namespaceURI!==Jn.svg||"innerHTML"in e)e.innerHTML=t;else{for((tr=tr||document.createElement("div")).innerHTML="<svg>"+t+"</svg>",t=tr.firstChild;e.firstChild;)e.removeChild(e.firstChild);for(;t.firstChild;)e.appendChild(t.firstChild)}});function rr(e,t){if(t){var n=e.firstChild;if(n&&n===e.lastChild&&3===n.nodeType)return void(n.nodeValue=t)}e.textContent=t}var or={animationIterationCount:!0,borderImageOutset:!0,borderImageSlice:!0,borderImageWidth:!0,boxFlex:!0,boxFlexGroup:!0,boxOrdinalGroup:!0,columnCount:!0,columns:!0,flex:!0,flexGrow:!0,flexPositive:!0,flexShrink:!0,flexNegative:!0,flexOrder:!0,gridArea:!0,gridRow:!0,gridRowEnd:!0,gridRowSpan:!0,gridRowStart:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnSpan:!0,gridColumnStart:!0,fontWeight:!0,lineClamp:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,tabSize:!0,widows:!0,zIndex:!0,zoom:!0,fillOpacity:!0,floodOpacity:!0,stopOpacity:!0,strokeDasharray:!0,strokeDashoffset:!0,strokeMiterlimit:!0,strokeOpacity:!0,strokeWidth:!0},ir=["Webkit","ms","Moz","O"];function ar(e,t){for(var n in e=e.style,t)if(t.hasOwnProperty(n)){var r=0===n.indexOf("--"),o=n,i=t[n];o=null==i||"boolean"==typeof i||""===i?"":r||"number"!=typeof i||0===i||or.hasOwnProperty(o)&&or[o]?(""+i).trim():i+"px","float"===n&&(n="cssFloat"),r?e.setProperty(n,o):e[n]=o}}Object.keys(or).forEach(function(e){ir.forEach(function(t){t=t+e.charAt(0).toUpperCase()+e.substring(1),or[t]=or[e]})});var ur=o({menuitem:!0},{area:!0,base:!0,br:!0,col:!0,embed:!0,hr:!0,img:!0,input:!0,keygen:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0});function sr(e,t){t&&(ur[e]&&(null!=t.children||null!=t.dangerouslySetInnerHTML)&&a("137",e,""),null!=t.dangerouslySetInnerHTML&&(null!=t.children&&a("60"),"object"==typeof t.dangerouslySetInnerHTML&&"__html"in t.dangerouslySetInnerHTML||a("61")),null!=t.style&&"object"!=typeof t.style&&a("62",""))}function cr(e,t){if(-1===e.indexOf("-"))return"string"==typeof t.is;switch(e){case"annotation-xml":case"color-profile":case"font-face":case"font-face-src":case"font-face-uri":case"font-face-format":case"font-face-name":case"missing-glyph":return!1;default:return!0}}function lr(e,t){var n=Mn(e=9===e.nodeType||11===e.nodeType?e:e.ownerDocument);t=b[t];for(var r=0;r<t.length;r++){var o=t[r];if(!n.hasOwnProperty(o)||!n[o]){switch(o){case"scroll":Tn("scroll",e);break;case"focus":case"blur":Tn("focus",e),Tn("blur",e),n.blur=!0,n.focus=!0;break;case"cancel":case"close":Ye(o)&&Tn(o,e);break;case"invalid":case"submit":case"reset":break;default:-1===re.indexOf(o)&&Sn(o,e)}n[o]=!0}}}function fr(){}var dr=null,pr=null;function hr(e,t){switch(e){case"button":case"input":case"select":case"textarea":return!!t.autoFocus}return!1}function mr(e,t){return"textarea"===e||"option"===e||"noscript"===e||"string"==typeof t.children||"number"==typeof t.children||"object"==typeof t.dangerouslySetInnerHTML&&null!==t.dangerouslySetInnerHTML&&null!=t.dangerouslySetInnerHTML.__html}function vr(e){for(e=e.nextSibling;e&&1!==e.nodeType&&3!==e.nodeType;)e=e.nextSibling;return e}function yr(e){for(e=e.firstChild;e&&1!==e.nodeType&&3!==e.nodeType;)e=e.nextSibling;return e}new Set;var gr=[],_r=-1;function br(e){0>_r||(e.current=gr[_r],gr[_r]=null,_r--)}function wr(e,t){gr[++_r]=e.current,e.current=t}var xr={},Er={current:xr},Or={current:!1},Sr=xr;function Tr(e,t){var n=e.type.contextTypes;if(!n)return xr;var r=e.stateNode;if(r&&r.__reactInternalMemoizedUnmaskedChildContext===t)return r.__reactInternalMemoizedMaskedChildContext;var o,i={};for(o in n)i[o]=t[o];return r&&((e=e.stateNode).__reactInternalMemoizedUnmaskedChildContext=t,e.__reactInternalMemoizedMaskedChildContext=i),i}function kr(e){return null!==(e=e.childContextTypes)&&void 0!==e}function Rr(e){br(Or),br(Er)}function jr(e){br(Or),br(Er)}function Pr(e,t,n){Er.current!==xr&&a("168"),wr(Er,t),wr(Or,n)}function Cr(e,t,n){var r=e.stateNode;if(e=t.childContextTypes,"function"!=typeof r.getChildContext)return n;for(var i in r=r.getChildContext())i in e||a("108",ut(t)||"Unknown",i);return o({},n,r)}function Mr(e){var t=e.stateNode;return t=t&&t.__reactInternalMemoizedMergedChildContext||xr,Sr=Er.current,wr(Er,t),wr(Or,Or.current),!0}function Ir(e,t,n){var r=e.stateNode;r||a("169"),n?(t=Cr(e,t,Sr),r.__reactInternalMemoizedMergedChildContext=t,br(Or),br(Er),wr(Er,t)):br(Or),wr(Or,n)}var Ar=null,Dr=null;function Nr(e){return function(t){try{return e(t)}catch(e){}}}function Lr(e,t,n,r){this.tag=e,this.key=n,this.sibling=this.child=this.return=this.stateNode=this.type=null,this.index=0,this.ref=null,this.pendingProps=t,this.firstContextDependency=this.memoizedState=this.updateQueue=this.memoizedProps=null,this.mode=r,this.effectTag=0,this.lastEffect=this.firstEffect=this.nextEffect=null,this.childExpirationTime=this.expirationTime=0,this.alternate=null}function Ur(e){return!(!(e=e.prototype)||!e.isReactComponent)}function Fr(e,t,n){var r=e.alternate;return null===r?((r=new Lr(e.tag,t,e.key,e.mode)).type=e.type,r.stateNode=e.stateNode,r.alternate=e,e.alternate=r):(r.pendingProps=t,r.effectTag=0,r.nextEffect=null,r.firstEffect=null,r.lastEffect=null),r.childExpirationTime=e.childExpirationTime,r.expirationTime=t!==e.pendingProps?n:e.expirationTime,r.child=e.child,r.memoizedProps=e.memoizedProps,r.memoizedState=e.memoizedState,r.updateQueue=e.updateQueue,r.firstContextDependency=e.firstContextDependency,r.sibling=e.sibling,r.index=e.index,r.ref=e.ref,r}function Br(e,t,n){var r=e.type,o=e.key;e=e.props;var i=void 0;if("function"==typeof r)i=Ur(r)?2:4;else if("string"==typeof r)i=7;else e:switch(r){case Qe:return Wr(e.children,t,n,o);case nt:i=10,t|=3;break;case Je:i=10,t|=2;break;case Ze:return(r=new Lr(15,e,o,4|t)).type=Ze,r.expirationTime=n,r;case ot:i=16;break;default:if("object"==typeof r&&null!==r)switch(r.$$typeof){case et:i=12;break e;case tt:i=11;break e;case rt:i=13;break e;default:if("function"==typeof r.then){i=4;break e}}a("130",null==r?r:typeof r,"")}return(t=new Lr(i,e,o,t)).type=r,t.expirationTime=n,t}function Wr(e,t,n,r){return(e=new Lr(9,e,r,t)).expirationTime=n,e}function Yr(e,t,n){return(e=new Lr(8,e,null,t)).expirationTime=n,e}function $r(e,t,n){return(t=new Lr(6,null!==e.children?e.children:[],e.key,t)).expirationTime=n,t.stateNode={containerInfo:e.containerInfo,pendingChildren:null,implementation:e.implementation},t}function qr(e,t){e.didError=!1;var n=e.earliestPendingTime;0===n?e.earliestPendingTime=e.latestPendingTime=t:n>t?e.earliestPendingTime=t:e.latestPendingTime<t&&(e.latestPendingTime=t),Hr(t,e)}function Hr(e,t){var n=t.earliestSuspendedTime,r=t.latestSuspendedTime,o=t.earliestPendingTime,i=t.latestPingedTime;0===(o=0!==o?o:i)&&(0===e||r>e)&&(o=r),0!==(e=o)&&0!==n&&n<e&&(e=n),t.nextExpirationTimeToWorkOn=o,t.expirationTime=e}var zr=!1;function Gr(e){return{baseState:e,firstUpdate:null,lastUpdate:null,firstCapturedUpdate:null,lastCapturedUpdate:null,firstEffect:null,lastEffect:null,firstCapturedEffect:null,lastCapturedEffect:null}}function Vr(e){return{baseState:e.baseState,firstUpdate:e.firstUpdate,lastUpdate:e.lastUpdate,firstCapturedUpdate:null,lastCapturedUpdate:null,firstEffect:null,lastEffect:null,firstCapturedEffect:null,lastCapturedEffect:null}}function Xr(e){return{expirationTime:e,tag:0,payload:null,callback:null,next:null,nextEffect:null}}function Kr(e,t){null===e.lastUpdate?e.firstUpdate=e.lastUpdate=t:(e.lastUpdate.next=t,e.lastUpdate=t)}function Qr(e,t){var n=e.alternate;if(null===n){var r=e.updateQueue,o=null;null===r&&(r=e.updateQueue=Gr(e.memoizedState))}else r=e.updateQueue,o=n.updateQueue,null===r?null===o?(r=e.updateQueue=Gr(e.memoizedState),o=n.updateQueue=Gr(n.memoizedState)):r=e.updateQueue=Vr(o):null===o&&(o=n.updateQueue=Vr(r));null===o||r===o?Kr(r,t):null===r.lastUpdate||null===o.lastUpdate?(Kr(r,t),Kr(o,t)):(Kr(r,t),o.lastUpdate=t)}function Jr(e,t){var n=e.updateQueue;null===(n=null===n?e.updateQueue=Gr(e.memoizedState):Zr(e,n)).lastCapturedUpdate?n.firstCapturedUpdate=n.lastCapturedUpdate=t:(n.lastCapturedUpdate.next=t,n.lastCapturedUpdate=t)}function Zr(e,t){var n=e.alternate;return null!==n&&t===n.updateQueue&&(t=e.updateQueue=Vr(t)),t}function eo(e,t,n,r,i,a){switch(n.tag){case 1:return"function"==typeof(e=n.payload)?e.call(a,r,i):e;case 3:e.effectTag=-1025&e.effectTag|64;case 0:if(null===(i="function"==typeof(e=n.payload)?e.call(a,r,i):e)||void 0===i)break;return o({},r,i);case 2:zr=!0}return r}function to(e,t,n,r,o){zr=!1;for(var i=(t=Zr(e,t)).baseState,a=null,u=0,s=t.firstUpdate,c=i;null!==s;){var l=s.expirationTime;l>o?(null===a&&(a=s,i=c),(0===u||u>l)&&(u=l)):(c=eo(e,0,s,c,n,r),null!==s.callback&&(e.effectTag|=32,s.nextEffect=null,null===t.lastEffect?t.firstEffect=t.lastEffect=s:(t.lastEffect.nextEffect=s,t.lastEffect=s))),s=s.next}for(l=null,s=t.firstCapturedUpdate;null!==s;){var f=s.expirationTime;f>o?(null===l&&(l=s,null===a&&(i=c)),(0===u||u>f)&&(u=f)):(c=eo(e,0,s,c,n,r),null!==s.callback&&(e.effectTag|=32,s.nextEffect=null,null===t.lastCapturedEffect?t.firstCapturedEffect=t.lastCapturedEffect=s:(t.lastCapturedEffect.nextEffect=s,t.lastCapturedEffect=s))),s=s.next}null===a&&(t.lastUpdate=null),null===l?t.lastCapturedUpdate=null:e.effectTag|=32,null===a&&null===l&&(i=c),t.baseState=i,t.firstUpdate=a,t.firstCapturedUpdate=l,e.expirationTime=u,e.memoizedState=c}function no(e,t,n){null!==t.firstCapturedUpdate&&(null!==t.lastUpdate&&(t.lastUpdate.next=t.firstCapturedUpdate,t.lastUpdate=t.lastCapturedUpdate),t.firstCapturedUpdate=t.lastCapturedUpdate=null),ro(t.firstEffect,n),t.firstEffect=t.lastEffect=null,ro(t.firstCapturedEffect,n),t.firstCapturedEffect=t.lastCapturedEffect=null}function ro(e,t){for(;null!==e;){var n=e.callback;if(null!==n){e.callback=null;var r=t;"function"!=typeof n&&a("191",n),n.call(r)}e=e.nextEffect}}function oo(e,t){return{value:e,source:t,stack:st(t)}}var io={current:null},ao=null,uo=null,so=null;function co(e,t){var n=e.type._context;wr(io,n._currentValue),n._currentValue=t}function lo(e){var t=io.current;br(io),e.type._context._currentValue=t}function fo(e){ao=e,so=uo=null,e.firstContextDependency=null}function po(e,t){return so!==e&&!1!==t&&0!==t&&("number"==typeof t&&1073741823!==t||(so=e,t=1073741823),t={context:e,observedBits:t,next:null},null===uo?(null===ao&&a("277"),ao.firstContextDependency=uo=t):uo=uo.next=t),e._currentValue}var ho={},mo={current:ho},vo={current:ho},yo={current:ho};function go(e){return e===ho&&a("174"),e}function _o(e,t){wr(yo,t),wr(vo,e),wr(mo,ho);var n=t.nodeType;switch(n){case 9:case 11:t=(t=t.documentElement)?t.namespaceURI:er(null,"");break;default:t=er(t=(n=8===n?t.parentNode:t).namespaceURI||null,n=n.tagName)}br(mo),wr(mo,t)}function bo(e){br(mo),br(vo),br(yo)}function wo(e){go(yo.current);var t=go(mo.current),n=er(t,e.type);t!==n&&(wr(vo,e),wr(mo,n))}function xo(e){vo.current===e&&(br(mo),br(vo))}var Eo=(new r.Component).refs;function Oo(e,t,n,r){n=null===(n=n(r,t=e.memoizedState))||void 0===n?t:o({},t,n),e.memoizedState=n,null!==(r=e.updateQueue)&&0===e.expirationTime&&(r.baseState=n)}var So={isMounted:function(e){return!!(e=e._reactInternalFiber)&&2===tn(e)},enqueueSetState:function(e,t,n){e=e._reactInternalFiber;var r=aa(),o=Xr(r=Mi(r,e));o.payload=t,void 0!==n&&null!==n&&(o.callback=n),Qr(e,o),Ii(e,r)},enqueueReplaceState:function(e,t,n){e=e._reactInternalFiber;var r=aa(),o=Xr(r=Mi(r,e));o.tag=1,o.payload=t,void 0!==n&&null!==n&&(o.callback=n),Qr(e,o),Ii(e,r)},enqueueForceUpdate:function(e,t){e=e._reactInternalFiber;var n=aa(),r=Xr(n=Mi(n,e));r.tag=2,void 0!==t&&null!==t&&(r.callback=t),Qr(e,r),Ii(e,n)}};function To(e,t,n,r,o,i,a){return"function"==typeof(e=e.stateNode).shouldComponentUpdate?e.shouldComponentUpdate(r,i,a):!t.prototype||!t.prototype.isPureReactComponent||(!en(n,r)||!en(o,i))}function ko(e,t,n,r){e=t.state,"function"==typeof t.componentWillReceiveProps&&t.componentWillReceiveProps(n,r),"function"==typeof t.UNSAFE_componentWillReceiveProps&&t.UNSAFE_componentWillReceiveProps(n,r),t.state!==e&&So.enqueueReplaceState(t,t.state,null)}function Ro(e,t,n,r){var o=e.stateNode,i=kr(t)?Sr:Er.current;o.props=n,o.state=e.memoizedState,o.refs=Eo,o.context=Tr(e,i),null!==(i=e.updateQueue)&&(to(e,i,n,o,r),o.state=e.memoizedState),"function"==typeof(i=t.getDerivedStateFromProps)&&(Oo(e,t,i,n),o.state=e.memoizedState),"function"==typeof t.getDerivedStateFromProps||"function"==typeof o.getSnapshotBeforeUpdate||"function"!=typeof o.UNSAFE_componentWillMount&&"function"!=typeof o.componentWillMount||(t=o.state,"function"==typeof o.componentWillMount&&o.componentWillMount(),"function"==typeof o.UNSAFE_componentWillMount&&o.UNSAFE_componentWillMount(),t!==o.state&&So.enqueueReplaceState(o,o.state,null),null!==(i=e.updateQueue)&&(to(e,i,n,o,r),o.state=e.memoizedState)),"function"==typeof o.componentDidMount&&(e.effectTag|=4)}var jo=Array.isArray;function Po(e,t,n){if(null!==(e=n.ref)&&"function"!=typeof e&&"object"!=typeof e){if(n._owner){var r=void 0;(n=n._owner)&&(2!==n.tag&&3!==n.tag&&a("110"),r=n.stateNode),r||a("147",e);var o=""+e;return null!==t&&null!==t.ref&&"function"==typeof t.ref&&t.ref._stringRef===o?t.ref:((t=function(e){var t=r.refs;t===Eo&&(t=r.refs={}),null===e?delete t[o]:t[o]=e})._stringRef=o,t)}"string"!=typeof e&&a("284"),n._owner||a("254",e)}return e}function Co(e,t){"textarea"!==e.type&&a("31","[object Object]"===Object.prototype.toString.call(t)?"object with keys {"+Object.keys(t).join(", ")+"}":t,"")}function Mo(e){function t(t,n){if(e){var r=t.lastEffect;null!==r?(r.nextEffect=n,t.lastEffect=n):t.firstEffect=t.lastEffect=n,n.nextEffect=null,n.effectTag=8}}function n(n,r){if(!e)return null;for(;null!==r;)t(n,r),r=r.sibling;return null}function r(e,t){for(e=new Map;null!==t;)null!==t.key?e.set(t.key,t):e.set(t.index,t),t=t.sibling;return e}function o(e,t,n){return(e=Fr(e,t,n)).index=0,e.sibling=null,e}function i(t,n,r){return t.index=r,e?null!==(r=t.alternate)?(r=r.index)<n?(t.effectTag=2,n):r:(t.effectTag=2,n):n}function u(t){return e&&null===t.alternate&&(t.effectTag=2),t}function s(e,t,n,r){return null===t||8!==t.tag?((t=Yr(n,e.mode,r)).return=e,t):((t=o(t,n,r)).return=e,t)}function c(e,t,n,r){return null!==t&&t.type===n.type?((r=o(t,n.props,r)).ref=Po(e,t,n),r.return=e,r):((r=Br(n,e.mode,r)).ref=Po(e,t,n),r.return=e,r)}function l(e,t,n,r){return null===t||6!==t.tag||t.stateNode.containerInfo!==n.containerInfo||t.stateNode.implementation!==n.implementation?((t=$r(n,e.mode,r)).return=e,t):((t=o(t,n.children||[],r)).return=e,t)}function f(e,t,n,r,i){return null===t||9!==t.tag?((t=Wr(n,e.mode,r,i)).return=e,t):((t=o(t,n,r)).return=e,t)}function d(e,t,n){if("string"==typeof t||"number"==typeof t)return(t=Yr(""+t,e.mode,n)).return=e,t;if("object"==typeof t&&null!==t){switch(t.$$typeof){case Xe:return(n=Br(t,e.mode,n)).ref=Po(e,null,t),n.return=e,n;case Ke:return(t=$r(t,e.mode,n)).return=e,t}if(jo(t)||at(t))return(t=Wr(t,e.mode,n,null)).return=e,t;Co(e,t)}return null}function p(e,t,n,r){var o=null!==t?t.key:null;if("string"==typeof n||"number"==typeof n)return null!==o?null:s(e,t,""+n,r);if("object"==typeof n&&null!==n){switch(n.$$typeof){case Xe:return n.key===o?n.type===Qe?f(e,t,n.props.children,r,o):c(e,t,n,r):null;case Ke:return n.key===o?l(e,t,n,r):null}if(jo(n)||at(n))return null!==o?null:f(e,t,n,r,null);Co(e,n)}return null}function h(e,t,n,r,o){if("string"==typeof r||"number"==typeof r)return s(t,e=e.get(n)||null,""+r,o);if("object"==typeof r&&null!==r){switch(r.$$typeof){case Xe:return e=e.get(null===r.key?n:r.key)||null,r.type===Qe?f(t,e,r.props.children,o,r.key):c(t,e,r,o);case Ke:return l(t,e=e.get(null===r.key?n:r.key)||null,r,o)}if(jo(r)||at(r))return f(t,e=e.get(n)||null,r,o,null);Co(t,r)}return null}function m(o,a,u,s){for(var c=null,l=null,f=a,m=a=0,v=null;null!==f&&m<u.length;m++){f.index>m?(v=f,f=null):v=f.sibling;var y=p(o,f,u[m],s);if(null===y){null===f&&(f=v);break}e&&f&&null===y.alternate&&t(o,f),a=i(y,a,m),null===l?c=y:l.sibling=y,l=y,f=v}if(m===u.length)return n(o,f),c;if(null===f){for(;m<u.length;m++)(f=d(o,u[m],s))&&(a=i(f,a,m),null===l?c=f:l.sibling=f,l=f);return c}for(f=r(o,f);m<u.length;m++)(v=h(f,o,m,u[m],s))&&(e&&null!==v.alternate&&f.delete(null===v.key?m:v.key),a=i(v,a,m),null===l?c=v:l.sibling=v,l=v);return e&&f.forEach(function(e){return t(o,e)}),c}function v(o,u,s,c){var l=at(s);"function"!=typeof l&&a("150"),null==(s=l.call(s))&&a("151");for(var f=l=null,m=u,v=u=0,y=null,g=s.next();null!==m&&!g.done;v++,g=s.next()){m.index>v?(y=m,m=null):y=m.sibling;var _=p(o,m,g.value,c);if(null===_){m||(m=y);break}e&&m&&null===_.alternate&&t(o,m),u=i(_,u,v),null===f?l=_:f.sibling=_,f=_,m=y}if(g.done)return n(o,m),l;if(null===m){for(;!g.done;v++,g=s.next())null!==(g=d(o,g.value,c))&&(u=i(g,u,v),null===f?l=g:f.sibling=g,f=g);return l}for(m=r(o,m);!g.done;v++,g=s.next())null!==(g=h(m,o,v,g.value,c))&&(e&&null!==g.alternate&&m.delete(null===g.key?v:g.key),u=i(g,u,v),null===f?l=g:f.sibling=g,f=g);return e&&m.forEach(function(e){return t(o,e)}),l}return function(e,r,i,s){var c="object"==typeof i&&null!==i&&i.type===Qe&&null===i.key;c&&(i=i.props.children);var l="object"==typeof i&&null!==i;if(l)switch(i.$$typeof){case Xe:e:{for(l=i.key,c=r;null!==c;){if(c.key===l){if(9===c.tag?i.type===Qe:c.type===i.type){n(e,c.sibling),(r=o(c,i.type===Qe?i.props.children:i.props,s)).ref=Po(e,c,i),r.return=e,e=r;break e}n(e,c);break}t(e,c),c=c.sibling}i.type===Qe?((r=Wr(i.props.children,e.mode,s,i.key)).return=e,e=r):((s=Br(i,e.mode,s)).ref=Po(e,r,i),s.return=e,e=s)}return u(e);case Ke:e:{for(c=i.key;null!==r;){if(r.key===c){if(6===r.tag&&r.stateNode.containerInfo===i.containerInfo&&r.stateNode.implementation===i.implementation){n(e,r.sibling),(r=o(r,i.children||[],s)).return=e,e=r;break e}n(e,r);break}t(e,r),r=r.sibling}(r=$r(i,e.mode,s)).return=e,e=r}return u(e)}if("string"==typeof i||"number"==typeof i)return i=""+i,null!==r&&8===r.tag?(n(e,r.sibling),(r=o(r,i,s)).return=e,e=r):(n(e,r),(r=Yr(i,e.mode,s)).return=e,e=r),u(e);if(jo(i))return m(e,r,i,s);if(at(i))return v(e,r,i,s);if(l&&Co(e,i),void 0===i&&!c)switch(e.tag){case 2:case 3:case 0:a("152",(s=e.type).displayName||s.name||"Component")}return n(e,r)}}var Io=Mo(!0),Ao=Mo(!1),Do=null,No=null,Lo=!1;function Uo(e,t){var n=new Lr(7,null,null,0);n.type="DELETED",n.stateNode=t,n.return=e,n.effectTag=8,null!==e.lastEffect?(e.lastEffect.nextEffect=n,e.lastEffect=n):e.firstEffect=e.lastEffect=n}function Fo(e,t){switch(e.tag){case 7:var n=e.type;return null!==(t=1!==t.nodeType||n.toLowerCase()!==t.nodeName.toLowerCase()?null:t)&&(e.stateNode=t,!0);case 8:return null!==(t=""===e.pendingProps||3!==t.nodeType?null:t)&&(e.stateNode=t,!0);default:return!1}}function Bo(e){if(Lo){var t=No;if(t){var n=t;if(!Fo(e,t)){if(!(t=vr(n))||!Fo(e,t))return e.effectTag|=2,Lo=!1,void(Do=e);Uo(Do,n)}Do=e,No=yr(t)}else e.effectTag|=2,Lo=!1,Do=e}}function Wo(e){for(e=e.return;null!==e&&7!==e.tag&&5!==e.tag;)e=e.return;Do=e}function Yo(e){if(e!==Do)return!1;if(!Lo)return Wo(e),Lo=!0,!1;var t=e.type;if(7!==e.tag||"head"!==t&&"body"!==t&&!mr(t,e.memoizedProps))for(t=No;t;)Uo(e,t),t=vr(t);return Wo(e),No=Do?vr(e.stateNode):null,!0}function $o(){No=Do=null,Lo=!1}var qo=ze.ReactCurrentOwner;function Ho(e,t,n,r){t.child=null===e?Ao(t,null,n,r):Io(t,e.child,n,r)}function zo(e,t,n,r,o){n=n.render;var i=t.ref;return Or.current||t.memoizedProps!==r||i!==(null!==e?e.ref:null)?(Ho(e,t,n=n(r,i),o),t.memoizedProps=r,t.child):Zo(e,t,o)}function Go(e,t){var n=t.ref;(null===e&&null!==n||null!==e&&e.ref!==n)&&(t.effectTag|=128)}function Vo(e,t,n,r,o){var i=kr(n)?Sr:Er.current;return i=Tr(t,i),fo(t),n=n(r,i),t.effectTag|=1,Ho(e,t,n,o),t.memoizedProps=r,t.child}function Xo(e,t,n,r,o){if(kr(n)){var i=!0;Mr(t)}else i=!1;if(fo(t),null===e)if(null===t.stateNode){var a=kr(n)?Sr:Er.current,u=n.contextTypes,s=null!==u&&void 0!==u,c=new n(r,u=s?Tr(t,a):xr);t.memoizedState=null!==c.state&&void 0!==c.state?c.state:null,c.updater=So,t.stateNode=c,c._reactInternalFiber=t,s&&((s=t.stateNode).__reactInternalMemoizedUnmaskedChildContext=a,s.__reactInternalMemoizedMaskedChildContext=u),Ro(t,n,r,o),r=!0}else{a=t.stateNode,u=t.memoizedProps,a.props=u;var l=a.context;s=Tr(t,s=kr(n)?Sr:Er.current);var f=n.getDerivedStateFromProps;(c="function"==typeof f||"function"==typeof a.getSnapshotBeforeUpdate)||"function"!=typeof a.UNSAFE_componentWillReceiveProps&&"function"!=typeof a.componentWillReceiveProps||(u!==r||l!==s)&&ko(t,a,r,s),zr=!1;var d=t.memoizedState;l=a.state=d;var p=t.updateQueue;null!==p&&(to(t,p,r,a,o),l=t.memoizedState),u!==r||d!==l||Or.current||zr?("function"==typeof f&&(Oo(t,n,f,r),l=t.memoizedState),(u=zr||To(t,n,u,r,d,l,s))?(c||"function"!=typeof a.UNSAFE_componentWillMount&&"function"!=typeof a.componentWillMount||("function"==typeof a.componentWillMount&&a.componentWillMount(),"function"==typeof a.UNSAFE_componentWillMount&&a.UNSAFE_componentWillMount()),"function"==typeof a.componentDidMount&&(t.effectTag|=4)):("function"==typeof a.componentDidMount&&(t.effectTag|=4),t.memoizedProps=r,t.memoizedState=l),a.props=r,a.state=l,a.context=s,r=u):("function"==typeof a.componentDidMount&&(t.effectTag|=4),r=!1)}else a=t.stateNode,u=t.memoizedProps,a.props=u,l=a.context,s=Tr(t,s=kr(n)?Sr:Er.current),(c="function"==typeof(f=n.getDerivedStateFromProps)||"function"==typeof a.getSnapshotBeforeUpdate)||"function"!=typeof a.UNSAFE_componentWillReceiveProps&&"function"!=typeof a.componentWillReceiveProps||(u!==r||l!==s)&&ko(t,a,r,s),zr=!1,l=t.memoizedState,d=a.state=l,null!==(p=t.updateQueue)&&(to(t,p,r,a,o),d=t.memoizedState),u!==r||l!==d||Or.current||zr?("function"==typeof f&&(Oo(t,n,f,r),d=t.memoizedState),(f=zr||To(t,n,u,r,l,d,s))?(c||"function"!=typeof a.UNSAFE_componentWillUpdate&&"function"!=typeof a.componentWillUpdate||("function"==typeof a.componentWillUpdate&&a.componentWillUpdate(r,d,s),"function"==typeof a.UNSAFE_componentWillUpdate&&a.UNSAFE_componentWillUpdate(r,d,s)),"function"==typeof a.componentDidUpdate&&(t.effectTag|=4),"function"==typeof a.getSnapshotBeforeUpdate&&(t.effectTag|=256)):("function"!=typeof a.componentDidUpdate||u===e.memoizedProps&&l===e.memoizedState||(t.effectTag|=4),"function"!=typeof a.getSnapshotBeforeUpdate||u===e.memoizedProps&&l===e.memoizedState||(t.effectTag|=256),t.memoizedProps=r,t.memoizedState=d),a.props=r,a.state=d,a.context=s,r=f):("function"!=typeof a.componentDidUpdate||u===e.memoizedProps&&l===e.memoizedState||(t.effectTag|=4),"function"!=typeof a.getSnapshotBeforeUpdate||u===e.memoizedProps&&l===e.memoizedState||(t.effectTag|=256),r=!1);return Ko(e,t,n,r,i,o)}function Ko(e,t,n,r,o,i){Go(e,t);var a=0!=(64&t.effectTag);if(!r&&!a)return o&&Ir(t,n,!1),Zo(e,t,i);r=t.stateNode,qo.current=t;var u=a?null:r.render();return t.effectTag|=1,null!==e&&a&&(Ho(e,t,null,i),t.child=null),Ho(e,t,u,i),t.memoizedState=r.state,t.memoizedProps=r.props,o&&Ir(t,n,!0),t.child}function Qo(e){var t=e.stateNode;t.pendingContext?Pr(0,t.pendingContext,t.pendingContext!==t.context):t.context&&Pr(0,t.context,!1),_o(e,t.containerInfo)}function Jo(e,t){if(e&&e.defaultProps)for(var n in t=o({},t),e=e.defaultProps)void 0===t[n]&&(t[n]=e[n]);return t}function Zo(e,t,n){null!==e&&(t.firstContextDependency=e.firstContextDependency);var r=t.childExpirationTime;if(0===r||r>n)return null;if(null!==e&&t.child!==e.child&&a("153"),null!==t.child){for(n=Fr(e=t.child,e.pendingProps,e.expirationTime),t.child=n,n.return=t;null!==e.sibling;)e=e.sibling,(n=n.sibling=Fr(e,e.pendingProps,e.expirationTime)).return=t;n.sibling=null}return t.child}function ei(e,t,n){var r=t.expirationTime;if(!Or.current&&(0===r||r>n)){switch(t.tag){case 5:Qo(t),$o();break;case 7:wo(t);break;case 2:kr(t.type)&&Mr(t);break;case 3:kr(t.type._reactResult)&&Mr(t);break;case 6:_o(t,t.stateNode.containerInfo);break;case 12:co(t,t.memoizedProps.value)}return Zo(e,t,n)}switch(t.expirationTime=0,t.tag){case 4:return function(e,t,n,r){null!==e&&a("155");var o=t.pendingProps;if("object"==typeof n&&null!==n&&"function"==typeof n.then){var i=n=function(e){switch(e._reactStatus){case 1:return e._reactResult;case 2:throw e._reactResult;case 0:throw e;default:throw e._reactStatus=0,e.then(function(t){if(0===e._reactStatus){if(e._reactStatus=1,"object"==typeof t&&null!==t){var n=t.default;t=void 0!==n&&null!==n?n:t}e._reactResult=t}},function(t){0===e._reactStatus&&(e._reactStatus=2,e._reactResult=t)}),e}}(n);i="function"==typeof i?Ur(i)?3:1:void 0!==i&&null!==i&&i.$$typeof?14:4,i=t.tag=i;var u=Jo(n,o);switch(i){case 1:return Vo(e,t,n,u,r);case 3:return Xo(e,t,n,u,r);case 14:return zo(e,t,n,u,r);default:a("283",n)}}if(i=Tr(t,Er.current),fo(t),i=n(o,i),t.effectTag|=1,"object"==typeof i&&null!==i&&"function"==typeof i.render&&void 0===i.$$typeof){t.tag=2,kr(n)?(u=!0,Mr(t)):u=!1,t.memoizedState=null!==i.state&&void 0!==i.state?i.state:null;var s=n.getDerivedStateFromProps;return"function"==typeof s&&Oo(t,n,s,o),i.updater=So,t.stateNode=i,i._reactInternalFiber=t,Ro(t,n,o,r),Ko(e,t,n,!0,u,r)}return t.tag=0,Ho(e,t,i,r),t.memoizedProps=o,t.child}(e,t,t.type,n);case 0:return Vo(e,t,t.type,t.pendingProps,n);case 1:var o=t.type._reactResult;return e=Vo(e,t,o,Jo(o,r=t.pendingProps),n),t.memoizedProps=r,e;case 2:return Xo(e,t,t.type,t.pendingProps,n);case 3:return e=Xo(e,t,o=t.type._reactResult,Jo(o,r=t.pendingProps),n),t.memoizedProps=r,e;case 5:return Qo(t),null===(r=t.updateQueue)&&a("282"),o=null!==(o=t.memoizedState)?o.element:null,to(t,r,t.pendingProps,null,n),(r=t.memoizedState.element)===o?($o(),t=Zo(e,t,n)):(o=t.stateNode,(o=(null===e||null===e.child)&&o.hydrate)&&(No=yr(t.stateNode.containerInfo),Do=t,o=Lo=!0),o?(t.effectTag|=2,t.child=Ao(t,null,r,n)):(Ho(e,t,r,n),$o()),t=t.child),t;case 7:wo(t),null===e&&Bo(t),r=t.type,o=t.pendingProps;var i=null!==e?e.memoizedProps:null,u=o.children;return mr(r,o)?u=null:null!==i&&mr(r,i)&&(t.effectTag|=16),Go(e,t),1073741823!==n&&1&t.mode&&o.hidden?(t.expirationTime=1073741823,t.memoizedProps=o,t=null):(Ho(e,t,u,n),t.memoizedProps=o,t=t.child),t;case 8:return null===e&&Bo(t),t.memoizedProps=t.pendingProps,null;case 16:return null;case 6:return _o(t,t.stateNode.containerInfo),r=t.pendingProps,null===e?t.child=Io(t,null,r,n):Ho(e,t,r,n),t.memoizedProps=r,t.child;case 13:return zo(e,t,t.type,t.pendingProps,n);case 14:return e=zo(e,t,o=t.type._reactResult,Jo(o,r=t.pendingProps),n),t.memoizedProps=r,e;case 9:return Ho(e,t,r=t.pendingProps,n),t.memoizedProps=r,t.child;case 10:return Ho(e,t,r=t.pendingProps.children,n),t.memoizedProps=r,t.child;case 15:return Ho(e,t,(r=t.pendingProps).children,n),t.memoizedProps=r,t.child;case 12:e:{if(r=t.type._context,o=t.pendingProps,u=t.memoizedProps,i=o.value,t.memoizedProps=o,co(t,i),null!==u){var s=u.value;if(0===(i=s===i&&(0!==s||1/s==1/i)||s!=s&&i!=i?0:0|("function"==typeof r._calculateChangedBits?r._calculateChangedBits(s,i):1073741823))){if(u.children===o.children&&!Or.current){t=Zo(e,t,n);break e}}else for(null!==(u=t.child)&&(u.return=t);null!==u;){if(null!==(s=u.firstContextDependency))do{if(s.context===r&&0!=(s.observedBits&i)){if(2===u.tag||3===u.tag){var c=Xr(n);c.tag=2,Qr(u,c)}(0===u.expirationTime||u.expirationTime>n)&&(u.expirationTime=n),null!==(c=u.alternate)&&(0===c.expirationTime||c.expirationTime>n)&&(c.expirationTime=n);for(var l=u.return;null!==l;){if(c=l.alternate,0===l.childExpirationTime||l.childExpirationTime>n)l.childExpirationTime=n,null!==c&&(0===c.childExpirationTime||c.childExpirationTime>n)&&(c.childExpirationTime=n);else{if(null===c||!(0===c.childExpirationTime||c.childExpirationTime>n))break;c.childExpirationTime=n}l=l.return}}c=u.child,s=s.next}while(null!==s);else c=12===u.tag&&u.type===t.type?null:u.child;if(null!==c)c.return=u;else for(c=u;null!==c;){if(c===t){c=null;break}if(null!==(u=c.sibling)){u.return=c.return,c=u;break}c=c.return}u=c}}Ho(e,t,o.children,n),t=t.child}return t;case 11:return i=t.type,o=(r=t.pendingProps).children,fo(t),o=o(i=po(i,r.unstable_observedBits)),t.effectTag|=1,Ho(e,t,o,n),t.memoizedProps=r,t.child;default:a("156")}}function ti(e){e.effectTag|=4}var ni=void 0,ri=void 0,oi=void 0;function ii(e,t){var n=t.source,r=t.stack;null===r&&null!==n&&(r=st(n)),null!==n&&ut(n.type),t=t.value,null!==e&&2===e.tag&&ut(e.type);try{console.error(t)}catch(e){setTimeout(function(){throw e})}}function ai(e){var t=e.ref;if(null!==t)if("function"==typeof t)try{t(null)}catch(t){Ci(e,t)}else t.current=null}function ui(e){switch("function"==typeof Dr&&Dr(e),e.tag){case 2:case 3:ai(e);var t=e.stateNode;if("function"==typeof t.componentWillUnmount)try{t.props=e.memoizedProps,t.state=e.memoizedState,t.componentWillUnmount()}catch(t){Ci(e,t)}break;case 7:ai(e);break;case 6:li(e)}}function si(e){return 7===e.tag||5===e.tag||6===e.tag}function ci(e){e:{for(var t=e.return;null!==t;){if(si(t)){var n=t;break e}t=t.return}a("160"),n=void 0}var r=t=void 0;switch(n.tag){case 7:t=n.stateNode,r=!1;break;case 5:case 6:t=n.stateNode.containerInfo,r=!0;break;default:a("161")}16&n.effectTag&&(rr(t,""),n.effectTag&=-17);e:t:for(n=e;;){for(;null===n.sibling;){if(null===n.return||si(n.return)){n=null;break e}n=n.return}for(n.sibling.return=n.return,n=n.sibling;7!==n.tag&&8!==n.tag;){if(2&n.effectTag)continue t;if(null===n.child||6===n.tag)continue t;n.child.return=n,n=n.child}if(!(2&n.effectTag)){n=n.stateNode;break e}}for(var o=e;;){if(7===o.tag||8===o.tag)if(n)if(r){var i=t,u=o.stateNode,s=n;8===i.nodeType?i.parentNode.insertBefore(u,s):i.insertBefore(u,s)}else t.insertBefore(o.stateNode,n);else r?(i=t,u=o.stateNode,8===i.nodeType?(s=i.parentNode).insertBefore(u,i):(s=i).appendChild(u),null===s.onclick&&(s.onclick=fr)):t.appendChild(o.stateNode);else if(6!==o.tag&&null!==o.child){o.child.return=o,o=o.child;continue}if(o===e)break;for(;null===o.sibling;){if(null===o.return||o.return===e)return;o=o.return}o.sibling.return=o.return,o=o.sibling}}function li(e){for(var t=e,n=!1,r=void 0,o=void 0;;){if(!n){n=t.return;e:for(;;){switch(null===n&&a("160"),n.tag){case 7:r=n.stateNode,o=!1;break e;case 5:case 6:r=n.stateNode.containerInfo,o=!0;break e}n=n.return}n=!0}if(7===t.tag||8===t.tag){e:for(var i=t,u=i;;)if(ui(u),null!==u.child&&6!==u.tag)u.child.return=u,u=u.child;else{if(u===i)break;for(;null===u.sibling;){if(null===u.return||u.return===i)break e;u=u.return}u.sibling.return=u.return,u=u.sibling}o?(i=r,u=t.stateNode,8===i.nodeType?i.parentNode.removeChild(u):i.removeChild(u)):r.removeChild(t.stateNode)}else if(6===t.tag?(r=t.stateNode.containerInfo,o=!0):ui(t),null!==t.child){t.child.return=t,t=t.child;continue}if(t===e)break;for(;null===t.sibling;){if(null===t.return||t.return===e)return;6===(t=t.return).tag&&(n=!1)}t.sibling.return=t.return,t=t.sibling}}function fi(e,t){switch(t.tag){case 2:case 3:break;case 7:var n=t.stateNode;if(null!=n){var r=t.memoizedProps,o=null!==e?e.memoizedProps:r;e=t.type;var i=t.updateQueue;if(t.updateQueue=null,null!==i){for(n[N]=r,"input"===e&&"radio"===r.type&&null!=r.name&&wt(n,r),cr(e,o),t=cr(e,r),o=0;o<i.length;o+=2){var u=i[o],s=i[o+1];"style"===u?ar(n,s):"dangerouslySetInnerHTML"===u?nr(n,s):"children"===u?rr(n,s):yt(n,u,s,t)}switch(e){case"input":xt(n,r);break;case"textarea":Kn(n,r);break;case"select":e=n._wrapperState.wasMultiple,n._wrapperState.wasMultiple=!!r.multiple,null!=(i=r.value)?Gn(n,!!r.multiple,i,!1):e!==!!r.multiple&&(null!=r.defaultValue?Gn(n,!!r.multiple,r.defaultValue,!0):Gn(n,!!r.multiple,r.multiple?[]:"",!1))}}}break;case 8:null===t.stateNode&&a("162"),t.stateNode.nodeValue=t.memoizedProps;break;case 5:case 15:case 16:break;default:a("163")}}function di(e,t,n){(n=Xr(n)).tag=3,n.payload={element:null};var r=t.value;return n.callback=function(){pa(r),ii(e,t)},n}function pi(e,t,n){(n=Xr(n)).tag=3;var r=e.stateNode;return null!==r&&"function"==typeof r.componentDidCatch&&(n.callback=function(){null===Ti?Ti=new Set([this]):Ti.add(this);var n=t.value,r=t.stack;ii(e,t),this.componentDidCatch(n,{componentStack:null!==r?r:""})}),n}function hi(e){switch(e.tag){case 2:kr(e.type)&&Rr();var t=e.effectTag;return 1024&t?(e.effectTag=-1025&t|64,e):null;case 3:return kr(e.type._reactResult)&&Rr(),1024&(t=e.effectTag)?(e.effectTag=-1025&t|64,e):null;case 5:return bo(),jr(),0!=(64&(t=e.effectTag))&&a("285"),e.effectTag=-1025&t|64,e;case 7:return xo(e),null;case 16:return 1024&(t=e.effectTag)?(e.effectTag=-1025&t|64,e):null;case 6:return bo(),null;case 12:return lo(e),null;default:return null}}ni=function(){},ri=function(e,t,n,r,i){var a=e.memoizedProps;if(a!==r){var u=t.stateNode;switch(go(mo.current),e=null,n){case"input":a=_t(u,a),r=_t(u,r),e=[];break;case"option":a=zn(u,a),r=zn(u,r),e=[];break;case"select":a=o({},a,{value:void 0}),r=o({},r,{value:void 0}),e=[];break;case"textarea":a=Vn(u,a),r=Vn(u,r),e=[];break;default:"function"!=typeof a.onClick&&"function"==typeof r.onClick&&(u.onclick=fr)}sr(n,r),u=n=void 0;var s=null;for(n in a)if(!r.hasOwnProperty(n)&&a.hasOwnProperty(n)&&null!=a[n])if("style"===n){var c=a[n];for(u in c)c.hasOwnProperty(u)&&(s||(s={}),s[u]="")}else"dangerouslySetInnerHTML"!==n&&"children"!==n&&"suppressContentEditableWarning"!==n&&"suppressHydrationWarning"!==n&&"autoFocus"!==n&&(_.hasOwnProperty(n)?e||(e=[]):(e=e||[]).push(n,null));for(n in r){var l=r[n];if(c=null!=a?a[n]:void 0,r.hasOwnProperty(n)&&l!==c&&(null!=l||null!=c))if("style"===n)if(c){for(u in c)!c.hasOwnProperty(u)||l&&l.hasOwnProperty(u)||(s||(s={}),s[u]="");for(u in l)l.hasOwnProperty(u)&&c[u]!==l[u]&&(s||(s={}),s[u]=l[u])}else s||(e||(e=[]),e.push(n,s)),s=l;else"dangerouslySetInnerHTML"===n?(l=l?l.__html:void 0,c=c?c.__html:void 0,null!=l&&c!==l&&(e=e||[]).push(n,""+l)):"children"===n?c===l||"string"!=typeof l&&"number"!=typeof l||(e=e||[]).push(n,""+l):"suppressContentEditableWarning"!==n&&"suppressHydrationWarning"!==n&&(_.hasOwnProperty(n)?(null!=l&&lr(i,n),e||c===l||(e=[])):(e=e||[]).push(n,l))}s&&(e=e||[]).push("style",s),i=e,(t.updateQueue=i)&&ti(t)}},oi=function(e,t,n,r){n!==r&&ti(t)};var mi={readContext:po},vi=ze.ReactCurrentOwner,yi=0,gi=0,_i=!1,bi=null,wi=null,xi=0,Ei=!1,Oi=null,Si=!1,Ti=null;function ki(){if(null!==bi)for(var e=bi.return;null!==e;){var t=e;switch(t.tag){case 2:var n=t.type.childContextTypes;null!==n&&void 0!==n&&Rr();break;case 3:null!==(n=t.type._reactResult.childContextTypes)&&void 0!==n&&Rr();break;case 5:bo(),jr();break;case 7:xo(t);break;case 6:bo();break;case 12:lo(t)}e=e.return}wi=null,xi=0,Ei=!1,bi=null}function Ri(e){for(;;){var t=e.alternate,n=e.return,r=e.sibling;if(0==(512&e.effectTag)){var i=t,u=(t=e).pendingProps;switch(t.tag){case 0:case 1:break;case 2:kr(t.type)&&Rr();break;case 3:kr(t.type._reactResult)&&Rr();break;case 5:bo(),jr(),(u=t.stateNode).pendingContext&&(u.context=u.pendingContext,u.pendingContext=null),null!==i&&null!==i.child||(Yo(t),t.effectTag&=-3),ni(t);break;case 7:xo(t);var s=go(yo.current),c=t.type;if(null!==i&&null!=t.stateNode)ri(i,t,c,u,s),i.ref!==t.ref&&(t.effectTag|=128);else if(u){var l=go(mo.current);if(Yo(t)){i=(u=t).stateNode;var f=u.type,d=u.memoizedProps,p=s;switch(i[D]=u,i[N]=d,c=void 0,s=f){case"iframe":case"object":Sn("load",i);break;case"video":case"audio":for(f=0;f<re.length;f++)Sn(re[f],i);break;case"source":Sn("error",i);break;case"img":case"image":case"link":Sn("error",i),Sn("load",i);break;case"form":Sn("reset",i),Sn("submit",i);break;case"details":Sn("toggle",i);break;case"input":bt(i,d),Sn("invalid",i),lr(p,"onChange");break;case"select":i._wrapperState={wasMultiple:!!d.multiple},Sn("invalid",i),lr(p,"onChange");break;case"textarea":Xn(i,d),Sn("invalid",i),lr(p,"onChange")}for(c in sr(s,d),f=null,d)d.hasOwnProperty(c)&&(l=d[c],"children"===c?"string"==typeof l?i.textContent!==l&&(f=["children",l]):"number"==typeof l&&i.textContent!==""+l&&(f=["children",""+l]):_.hasOwnProperty(c)&&null!=l&&lr(p,c));switch(s){case"input":qe(i),Et(i,d,!0);break;case"textarea":qe(i),Qn(i);break;case"select":case"option":break;default:"function"==typeof d.onClick&&(i.onclick=fr)}c=f,u.updateQueue=c,(u=null!==c)&&ti(t)}else{d=t,i=c,p=u,f=9===s.nodeType?s:s.ownerDocument,l===Jn.html&&(l=Zn(i)),l===Jn.html?"script"===i?((i=f.createElement("div")).innerHTML="<script><\/script>",f=i.removeChild(i.firstChild)):"string"==typeof p.is?f=f.createElement(i,{is:p.is}):(f=f.createElement(i),"select"===i&&p.multiple&&(f.multiple=!0)):f=f.createElementNS(l,i),(i=f)[D]=d,i[N]=u;e:for(d=i,p=t,f=p.child;null!==f;){if(7===f.tag||8===f.tag)d.appendChild(f.stateNode);else if(6!==f.tag&&null!==f.child){f.child.return=f,f=f.child;continue}if(f===p)break;for(;null===f.sibling;){if(null===f.return||f.return===p)break e;f=f.return}f.sibling.return=f.return,f=f.sibling}p=i;var h=s,m=cr(f=c,d=u);switch(f){case"iframe":case"object":Sn("load",p),s=d;break;case"video":case"audio":for(s=0;s<re.length;s++)Sn(re[s],p);s=d;break;case"source":Sn("error",p),s=d;break;case"img":case"image":case"link":Sn("error",p),Sn("load",p),s=d;break;case"form":Sn("reset",p),Sn("submit",p),s=d;break;case"details":Sn("toggle",p),s=d;break;case"input":bt(p,d),s=_t(p,d),Sn("invalid",p),lr(h,"onChange");break;case"option":s=zn(p,d);break;case"select":p._wrapperState={wasMultiple:!!d.multiple},s=o({},d,{value:void 0}),Sn("invalid",p),lr(h,"onChange");break;case"textarea":Xn(p,d),s=Vn(p,d),Sn("invalid",p),lr(h,"onChange");break;default:s=d}sr(f,s),l=void 0;var v=f,y=p,g=s;for(l in g)if(g.hasOwnProperty(l)){var b=g[l];"style"===l?ar(y,b):"dangerouslySetInnerHTML"===l?null!=(b=b?b.__html:void 0)&&nr(y,b):"children"===l?"string"==typeof b?("textarea"!==v||""!==b)&&rr(y,b):"number"==typeof b&&rr(y,""+b):"suppressContentEditableWarning"!==l&&"suppressHydrationWarning"!==l&&"autoFocus"!==l&&(_.hasOwnProperty(l)?null!=b&&lr(h,l):null!=b&&yt(y,l,b,m))}switch(f){case"input":qe(p),Et(p,d,!1);break;case"textarea":qe(p),Qn(p);break;case"option":null!=d.value&&p.setAttribute("value",""+gt(d.value));break;case"select":(s=p).multiple=!!d.multiple,null!=(p=d.value)?Gn(s,!!d.multiple,p,!1):null!=d.defaultValue&&Gn(s,!!d.multiple,d.defaultValue,!0);break;default:"function"==typeof s.onClick&&(p.onclick=fr)}(u=hr(c,u))&&ti(t),t.stateNode=i}null!==t.ref&&(t.effectTag|=128)}else null===t.stateNode&&a("166");break;case 8:i&&null!=t.stateNode?oi(i,t,i.memoizedProps,u):("string"!=typeof u&&(null===t.stateNode&&a("166")),i=go(yo.current),go(mo.current),Yo(t)?(c=(u=t).stateNode,i=u.memoizedProps,c[D]=u,(u=c.nodeValue!==i)&&ti(t)):(c=t,(u=(9===i.nodeType?i:i.ownerDocument).createTextNode(u))[D]=c,t.stateNode=u));break;case 13:case 14:case 16:case 9:case 10:case 15:break;case 6:bo(),ni(t);break;case 12:lo(t);break;case 11:break;case 4:a("167");default:a("156")}if(t=bi=null,u=e,1073741823===xi||1073741823!==u.childExpirationTime){for(c=0,i=u.child;null!==i;)s=i.expirationTime,d=i.childExpirationTime,(0===c||0!==s&&s<c)&&(c=s),(0===c||0!==d&&d<c)&&(c=d),i=i.sibling;u.childExpirationTime=c}if(null!==t)return t;null!==n&&0==(512&n.effectTag)&&(null===n.firstEffect&&(n.firstEffect=e.firstEffect),null!==e.lastEffect&&(null!==n.lastEffect&&(n.lastEffect.nextEffect=e.firstEffect),n.lastEffect=e.lastEffect),1<e.effectTag&&(null!==n.lastEffect?n.lastEffect.nextEffect=e:n.firstEffect=e,n.lastEffect=e))}else{if(null!==(e=hi(e)))return e.effectTag&=511,e;null!==n&&(n.firstEffect=n.lastEffect=null,n.effectTag|=512)}if(null!==r)return r;if(null===n)break;e=n}return null}function ji(e){var t=ei(e.alternate,e,xi);return null===t&&(t=Ri(e)),vi.current=null,t}function Pi(e,t,n){_i&&a("243"),_i=!0,vi.currentDispatcher=mi;var r=e.nextExpirationTimeToWorkOn;r===xi&&e===wi&&null!==bi||(ki(),xi=r,bi=Fr((wi=e).current,null,xi),e.pendingCommitExpirationTime=0);for(var o=!1;;){try{if(t)for(;null!==bi&&!da();)bi=ji(bi);else for(;null!==bi;)bi=ji(bi)}catch(e){if(null===bi)o=!0,pa(e);else{null===bi&&a("271");var i=bi,u=i.return;if(null!==u){e:{var s=u,c=i,l=e;u=xi,c.effectTag|=512,c.firstEffect=c.lastEffect=null,Ei=!0,l=oo(l,c);do{switch(s.tag){case 5:s.effectTag|=1024,s.expirationTime=u,Jr(s,u=di(s,l,u));break e;case 2:case 3:c=l;var f=s.stateNode;if(0==(64&s.effectTag)&&null!==f&&"function"==typeof f.componentDidCatch&&(null===Ti||!Ti.has(f))){s.effectTag|=1024,s.expirationTime=u,Jr(s,u=pi(s,c,u));break e}}s=s.return}while(null!==s)}bi=Ri(i);continue}o=!0,pa(e)}}break}if(_i=!1,so=uo=ao=vi.currentDispatcher=null,o)wi=null,e.finishedWork=null;else if(null!==bi)e.finishedWork=null;else{if(null===(t=e.current.alternate)&&a("281"),wi=null,Ei){if(o=e.latestPendingTime,i=e.latestSuspendedTime,u=e.latestPingedTime,0!==o&&o>r||0!==i&&i>r||0!==u&&u>r)return e.didError=!1,0!==(n=e.latestPingedTime)&&n<=r&&(e.latestPingedTime=0),n=e.earliestPendingTime,t=e.latestPendingTime,n===r?e.earliestPendingTime=t===r?e.latestPendingTime=0:t:t===r&&(e.latestPendingTime=n),n=e.earliestSuspendedTime,t=e.latestSuspendedTime,0===n?e.earliestSuspendedTime=e.latestSuspendedTime=r:n>r?e.earliestSuspendedTime=r:t<r&&(e.latestSuspendedTime=r),Hr(r,e),void(e.expirationTime=e.expirationTime);if(!e.didError&&!n)return e.didError=!0,e.nextExpirationTimeToWorkOn=r,r=e.expirationTime=1,void(e.expirationTime=r)}e.pendingCommitExpirationTime=r,e.finishedWork=t}}function Ci(e,t){var n;e:{for(_i&&!Si&&a("263"),n=e.return;null!==n;){switch(n.tag){case 2:case 3:var r=n.stateNode;if("function"==typeof n.type.getDerivedStateFromCatch||"function"==typeof r.componentDidCatch&&(null===Ti||!Ti.has(r))){Qr(n,e=pi(n,e=oo(t,e),1)),Ii(n,1),n=void 0;break e}break;case 5:Qr(n,e=di(n,e=oo(t,e),1)),Ii(n,1),n=void 0;break e}n=n.return}5===e.tag&&(Qr(e,n=di(e,n=oo(t,e),1)),Ii(e,1)),n=void 0}return n}function Mi(e,t){return 0!==gi?e=gi:_i?e=Si?1:xi:1&t.mode?(e=Xi?2+10*(1+((e-2+15)/10|0)):2+25*(1+((e-2+500)/25|0)),null!==wi&&e===xi&&(e+=1)):e=1,Xi&&(0===Yi||e>Yi)&&(Yi=e),e}function Ii(e,t){e:{(0===e.expirationTime||e.expirationTime>t)&&(e.expirationTime=t);var n=e.alternate;null!==n&&(0===n.expirationTime||n.expirationTime>t)&&(n.expirationTime=t);var r=e.return;if(null===r&&5===e.tag)e=e.stateNode;else{for(;null!==r;){if(n=r.alternate,(0===r.childExpirationTime||r.childExpirationTime>t)&&(r.childExpirationTime=t),null!==n&&(0===n.childExpirationTime||n.childExpirationTime>t)&&(n.childExpirationTime=t),null===r.return&&5===r.tag){e=r.stateNode;break e}r=r.return}e=null}}null!==e&&(!_i&&0!==xi&&t<xi&&ki(),qr(e,t),_i&&!Si&&wi===e||(t=e,e=e.expirationTime,null===t.nextScheduledRoot?(t.expirationTime=e,null===Ni?(Di=Ni=t,t.nextScheduledRoot=t):(Ni=Ni.nextScheduledRoot=t).nextScheduledRoot=Di):(0===(n=t.expirationTime)||e<n)&&(t.expirationTime=e),Fi||(Gi?Vi&&(Bi=t,Wi=1,la(t,1,!0)):1===e?ca(1,null):ia(t,e))),ta>ea&&(ta=0,a("185")))}function Ai(e,t,n,r,o){var i=gi;gi=1;try{return e(t,n,r,o)}finally{gi=i}}var Di=null,Ni=null,Li=0,Ui=void 0,Fi=!1,Bi=null,Wi=0,Yi=0,$i=!1,qi=!1,Hi=null,zi=null,Gi=!1,Vi=!1,Xi=!1,Ki=null,Qi=i.unstable_now(),Ji=2+(Qi/10|0),Zi=Ji,ea=50,ta=0,na=null,ra=1;function oa(){Ji=2+((i.unstable_now()-Qi)/10|0)}function ia(e,t){if(0!==Li){if(t>Li)return;null!==Ui&&i.unstable_cancelScheduledWork(Ui)}Li=t,e=i.unstable_now()-Qi,Ui=i.unstable_scheduleWork(sa,{timeout:10*(t-2)-e})}function aa(){return Fi?Zi:(ua(),0!==Wi&&1073741823!==Wi||(oa(),Zi=Ji),Zi)}function ua(){var e=0,t=null;if(null!==Ni)for(var n=Ni,r=Di;null!==r;){var o=r.expirationTime;if(0===o){if((null===n||null===Ni)&&a("244"),r===r.nextScheduledRoot){Di=Ni=r.nextScheduledRoot=null;break}if(r===Di)Di=o=r.nextScheduledRoot,Ni.nextScheduledRoot=o,r.nextScheduledRoot=null;else{if(r===Ni){(Ni=n).nextScheduledRoot=Di,r.nextScheduledRoot=null;break}n.nextScheduledRoot=r.nextScheduledRoot,r.nextScheduledRoot=null}r=n.nextScheduledRoot}else{if((0===e||o<e)&&(e=o,t=r),r===Ni)break;if(1===e)break;n=r,r=r.nextScheduledRoot}}Bi=t,Wi=e}function sa(e){if(e.didTimeout&&null!==Di){oa();var t=Di;do{var n=t.expirationTime;0!==n&&Ji>=n&&(t.nextExpirationTimeToWorkOn=Ji),t=t.nextScheduledRoot}while(t!==Di)}ca(0,e)}function ca(e,t){if(zi=t,ua(),null!==zi)for(oa(),Zi=Ji;null!==Bi&&0!==Wi&&(0===e||e>=Wi)&&(!$i||Ji>=Wi);)la(Bi,Wi,Ji>=Wi),ua(),oa(),Zi=Ji;else for(;null!==Bi&&0!==Wi&&(0===e||e>=Wi);)la(Bi,Wi,!0),ua();if(null!==zi&&(Li=0,Ui=null),0!==Wi&&ia(Bi,Wi),zi=null,$i=!1,ta=0,na=null,null!==Ki)for(e=Ki,Ki=null,t=0;t<e.length;t++){var n=e[t];try{n._onComplete()}catch(e){qi||(qi=!0,Hi=e)}}if(qi)throw e=Hi,Hi=null,qi=!1,e}function la(e,t,n){if(Fi&&a("245"),Fi=!0,null===zi||n){var r=e.finishedWork;null!==r?fa(e,r,t):(e.finishedWork=null,Pi(e,!1,n),null!==(r=e.finishedWork)&&fa(e,r,t))}else null!==(r=e.finishedWork)?fa(e,r,t):(e.finishedWork=null,Pi(e,!0,n),null!==(r=e.finishedWork)&&(da()?e.finishedWork=r:fa(e,r,t)));Fi=!1}function fa(e,t,n){var r=e.firstBatch;if(null!==r&&r._expirationTime<=n&&(null===Ki?Ki=[r]:Ki.push(r),r._defer))return e.finishedWork=t,void(e.expirationTime=0);e.finishedWork=null,e===na?ta++:(na=e,ta=0),Si=_i=!0,e.current===t&&a("177"),0===(n=e.pendingCommitExpirationTime)&&a("261"),e.pendingCommitExpirationTime=0,r=t.expirationTime;var o=t.childExpirationTime;if(r=0===r||0!==o&&o<r?o:r,e.didError=!1,0===r?(e.earliestPendingTime=0,e.latestPendingTime=0,e.earliestSuspendedTime=0,e.latestSuspendedTime=0,e.latestPingedTime=0):(0!==(o=e.latestPendingTime)&&(o<r?e.earliestPendingTime=e.latestPendingTime=0:e.earliestPendingTime<r&&(e.earliestPendingTime=e.latestPendingTime)),0===(o=e.earliestSuspendedTime)?qr(e,r):r>e.latestSuspendedTime?(e.earliestSuspendedTime=0,e.latestSuspendedTime=0,e.latestPingedTime=0,qr(e,r)):r<o&&qr(e,r)),Hr(0,e),vi.current=null,1<t.effectTag?null!==t.lastEffect?(t.lastEffect.nextEffect=t,r=t.firstEffect):r=t:r=t.firstEffect,dr=On,Ln(o=Nn())){if("selectionStart"in o)var i={start:o.selectionStart,end:o.selectionEnd};else e:{var u=(i=(i=o.ownerDocument)&&i.defaultView||window).getSelection&&i.getSelection();if(u&&0!==u.rangeCount){i=u.anchorNode;var s=u.anchorOffset,c=u.focusNode;u=u.focusOffset;try{i.nodeType,c.nodeType}catch(e){i=null;break e}var l=0,f=-1,d=-1,p=0,h=0,m=o,v=null;t:for(;;){for(var y;m!==i||0!==s&&3!==m.nodeType||(f=l+s),m!==c||0!==u&&3!==m.nodeType||(d=l+u),3===m.nodeType&&(l+=m.nodeValue.length),null!==(y=m.firstChild);)v=m,m=y;for(;;){if(m===o)break t;if(v===i&&++p===s&&(f=l),v===c&&++h===u&&(d=l),null!==(y=m.nextSibling))break;v=(m=v).parentNode}m=y}i=-1===f||-1===d?null:{start:f,end:d}}else i=null}i=i||{start:0,end:0}}else i=null;for(pr={focusedElem:o,selectionRange:i},On=!1,Oi=r;null!==Oi;){o=!1,i=void 0;try{for(;null!==Oi;){if(256&Oi.effectTag){var g=Oi.alternate;e:switch(s=Oi,s.tag){case 2:case 3:if(256&s.effectTag&&null!==g){var _=g.memoizedProps,b=g.memoizedState,w=s.stateNode;w.props=s.memoizedProps,w.state=s.memoizedState;var x=w.getSnapshotBeforeUpdate(_,b);w.__reactInternalSnapshotBeforeUpdate=x}break e;case 5:case 7:case 8:case 6:break e;default:a("163")}}Oi=Oi.nextEffect}}catch(e){o=!0,i=e}o&&(null===Oi&&a("178"),Ci(Oi,i),null!==Oi&&(Oi=Oi.nextEffect))}for(Oi=r;null!==Oi;){g=!1,_=void 0;try{for(;null!==Oi;){var E=Oi.effectTag;if(16&E&&rr(Oi.stateNode,""),128&E){var O=Oi.alternate;if(null!==O){var S=O.ref;null!==S&&("function"==typeof S?S(null):S.current=null)}}switch(14&E){case 2:ci(Oi),Oi.effectTag&=-3;break;case 6:ci(Oi),Oi.effectTag&=-3,fi(Oi.alternate,Oi);break;case 4:fi(Oi.alternate,Oi);break;case 8:li(b=Oi),b.return=null,b.child=null,b.alternate&&(b.alternate.child=null,b.alternate.return=null)}Oi=Oi.nextEffect}}catch(e){g=!0,_=e}g&&(null===Oi&&a("178"),Ci(Oi,_),null!==Oi&&(Oi=Oi.nextEffect))}if(S=pr,O=Nn(),E=S.focusedElem,_=S.selectionRange,O!==E&&E&&E.ownerDocument&&function e(t,n){return!(!t||!n)&&(t===n||(!t||3!==t.nodeType)&&(n&&3===n.nodeType?e(t,n.parentNode):"contains"in t?t.contains(n):!!t.compareDocumentPosition&&!!(16&t.compareDocumentPosition(n))))}(E.ownerDocument.documentElement,E)){null!==_&&Ln(E)&&(O=_.start,void 0===(S=_.end)&&(S=O),"selectionStart"in E?(E.selectionStart=O,E.selectionEnd=Math.min(S,E.value.length)):(O=((g=E.ownerDocument||document)&&g.defaultView||window).getSelection(),b=E.textContent.length,S=Math.min(_.start,b),_=void 0===_.end?S:Math.min(_.end,b),!O.extend&&S>_&&(b=_,_=S,S=b),b=Dn(E,S),w=Dn(E,_),b&&w&&(1!==O.rangeCount||O.anchorNode!==b.node||O.anchorOffset!==b.offset||O.focusNode!==w.node||O.focusOffset!==w.offset)&&((g=g.createRange()).setStart(b.node,b.offset),O.removeAllRanges(),S>_?(O.addRange(g),O.extend(w.node,w.offset)):(g.setEnd(w.node,w.offset),O.addRange(g))))),O=[];for(S=E;S=S.parentNode;)1===S.nodeType&&O.push({element:S,left:S.scrollLeft,top:S.scrollTop});for("function"==typeof E.focus&&E.focus(),E=0;E<O.length;E++)(S=O[E]).element.scrollLeft=S.left,S.element.scrollTop=S.top}for(pr=null,On=!!dr,dr=null,e.current=t,Oi=r;null!==Oi;){r=!1,E=void 0;try{for(O=n;null!==Oi;){var T=Oi.effectTag;if(36&T){var k=Oi.alternate;switch(g=O,(S=Oi).tag){case 2:case 3:var R=S.stateNode;if(4&S.effectTag)if(null===k)R.props=S.memoizedProps,R.state=S.memoizedState,R.componentDidMount();else{var j=k.memoizedProps,P=k.memoizedState;R.props=S.memoizedProps,R.state=S.memoizedState,R.componentDidUpdate(j,P,R.__reactInternalSnapshotBeforeUpdate)}var C=S.updateQueue;null!==C&&(R.props=S.memoizedProps,R.state=S.memoizedState,no(0,C,R));break;case 5:var M=S.updateQueue;if(null!==M){if(_=null,null!==S.child)switch(S.child.tag){case 7:_=S.child.stateNode;break;case 2:case 3:_=S.child.stateNode}no(0,M,_)}break;case 7:var I=S.stateNode;null===k&&4&S.effectTag&&hr(S.type,S.memoizedProps)&&I.focus();break;case 8:case 6:case 15:case 16:break;default:a("163")}}if(128&T){var A=Oi.ref;if(null!==A){var D=Oi.stateNode;switch(Oi.tag){case 7:var N=D;break;default:N=D}"function"==typeof A?A(N):A.current=N}}var L=Oi.nextEffect;Oi.nextEffect=null,Oi=L}}catch(e){r=!0,E=e}r&&(null===Oi&&a("178"),Ci(Oi,E),null!==Oi&&(Oi=Oi.nextEffect))}_i=Si=!1,"function"==typeof Ar&&Ar(t.stateNode),T=t.expirationTime,t=t.childExpirationTime,0===(t=0===T||0!==t&&t<T?t:T)&&(Ti=null),e.expirationTime=t,e.finishedWork=null}function da(){return!!$i||!(null===zi||zi.timeRemaining()>ra)&&($i=!0)}function pa(e){null===Bi&&a("246"),Bi.expirationTime=0,qi||(qi=!0,Hi=e)}function ha(e,t){var n=Gi;Gi=!0;try{return e(t)}finally{(Gi=n)||Fi||ca(1,null)}}function ma(e,t){if(Gi&&!Vi){Vi=!0;try{return e(t)}finally{Vi=!1}}return e(t)}function va(e,t,n){if(Xi)return e(t,n);Gi||Fi||0===Yi||(ca(Yi,null),Yi=0);var r=Xi,o=Gi;Gi=Xi=!0;try{return e(t,n)}finally{Xi=r,(Gi=o)||Fi||ca(1,null)}}function ya(e,t,n,r,o){var i=t.current;return n=function(e){if(!e)return xr;e=e._reactInternalFiber;e:{(2!==tn(e)||2!==e.tag&&3!==e.tag)&&a("170");var t=e;do{switch(t.tag){case 5:t=t.stateNode.context;break e;case 2:if(kr(t.type)){t=t.stateNode.__reactInternalMemoizedMergedChildContext;break e}break;case 3:if(kr(t.type._reactResult)){t=t.stateNode.__reactInternalMemoizedMergedChildContext;break e}}t=t.return}while(null!==t);a("171"),t=void 0}if(2===e.tag){var n=e.type;if(kr(n))return Cr(e,n,t)}else if(3===e.tag&&kr(n=e.type._reactResult))return Cr(e,n,t);return t}(n),null===t.context?t.context=n:t.pendingContext=n,t=o,(o=Xr(r)).payload={element:e},null!==(t=void 0===t?null:t)&&(o.callback=t),Qr(i,o),Ii(i,r),r}function ga(e,t,n,r){var o=t.current;return ya(e,t,n,o=Mi(aa(),o),r)}function _a(e){if(!(e=e.current).child)return null;switch(e.child.tag){case 7:default:return e.child.stateNode}}function ba(e){var t=2+25*(1+((aa()-2+500)/25|0));t<=yi&&(t=yi+1),this._expirationTime=yi=t,this._root=e,this._callbacks=this._next=null,this._hasChildren=this._didComplete=!1,this._children=null,this._defer=!0}function wa(){this._callbacks=null,this._didCommit=!1,this._onCommit=this._onCommit.bind(this)}function xa(e,t,n){e={current:t=new Lr(5,null,null,t?3:0),containerInfo:e,pendingChildren:null,earliestPendingTime:0,latestPendingTime:0,earliestSuspendedTime:0,latestSuspendedTime:0,latestPingedTime:0,didError:!1,pendingCommitExpirationTime:0,finishedWork:null,timeoutHandle:-1,context:null,pendingContext:null,hydrate:n,nextExpirationTimeToWorkOn:0,expirationTime:0,firstBatch:null,nextScheduledRoot:null},this._internalRoot=t.stateNode=e}function Ea(e){return!(!e||1!==e.nodeType&&9!==e.nodeType&&11!==e.nodeType&&(8!==e.nodeType||" react-mount-point-unstable "!==e.nodeValue))}function Oa(e,t,n,r,o){Ea(n)||a("200");var i=n._reactRootContainer;if(i){if("function"==typeof o){var u=o;o=function(){var e=_a(i._internalRoot);u.call(e)}}null!=e?i.legacy_renderSubtreeIntoContainer(e,t,o):i.render(t,o)}else{if(i=n._reactRootContainer=function(e,t){if(t||(t=!(!(t=e?9===e.nodeType?e.documentElement:e.firstChild:null)||1!==t.nodeType||!t.hasAttribute("data-reactroot"))),!t)for(var n;n=e.lastChild;)e.removeChild(n);return new xa(e,!1,t)}(n,r),"function"==typeof o){var s=o;o=function(){var e=_a(i._internalRoot);s.call(e)}}ma(function(){null!=e?i.legacy_renderSubtreeIntoContainer(e,t,o):i.render(t,o)})}return _a(i._internalRoot)}function Sa(e,t){var n=2<arguments.length&&void 0!==arguments[2]?arguments[2]:null;return Ea(t)||a("200"),function(e,t,n){var r=3<arguments.length&&void 0!==arguments[3]?arguments[3]:null;return{$$typeof:Ke,key:null==r?null:""+r,children:e,containerInfo:t,implementation:n}}(e,t,null,n)}Re=function(e,t,n){switch(t){case"input":if(xt(e,n),t=n.name,"radio"===n.type&&null!=t){for(n=e;n.parentNode;)n=n.parentNode;for(n=n.querySelectorAll("input[name="+JSON.stringify(""+t)+'][type="radio"]'),t=0;t<n.length;t++){var r=n[t];if(r!==e&&r.form===e.form){var o=B(r);o||a("90"),He(r),xt(r,o)}}}break;case"textarea":Kn(e,n);break;case"select":null!=(t=n.value)&&Gn(e,!!n.multiple,t,!1)}},ba.prototype.render=function(e){this._defer||a("250"),this._hasChildren=!0,this._children=e;var t=this._root._internalRoot,n=this._expirationTime,r=new wa;return ya(e,t,null,n,r._onCommit),r},ba.prototype.then=function(e){if(this._didComplete)e();else{var t=this._callbacks;null===t&&(t=this._callbacks=[]),t.push(e)}},ba.prototype.commit=function(){var e=this._root._internalRoot,t=e.firstBatch;if(this._defer&&null!==t||a("251"),this._hasChildren){var n=this._expirationTime;if(t!==this){this._hasChildren&&(n=this._expirationTime=t._expirationTime,this.render(this._children));for(var r=null,o=t;o!==this;)r=o,o=o._next;null===r&&a("251"),r._next=o._next,this._next=t,e.firstBatch=this}this._defer=!1,t=n,Fi&&a("253"),Bi=e,Wi=t,la(e,t,!0),ca(1,null),t=this._next,this._next=null,null!==(t=e.firstBatch=t)&&t._hasChildren&&t.render(t._children)}else this._next=null,this._defer=!1},ba.prototype._onComplete=function(){if(!this._didComplete){this._didComplete=!0;var e=this._callbacks;if(null!==e)for(var t=0;t<e.length;t++)(0,e[t])()}},wa.prototype.then=function(e){if(this._didCommit)e();else{var t=this._callbacks;null===t&&(t=this._callbacks=[]),t.push(e)}},wa.prototype._onCommit=function(){if(!this._didCommit){this._didCommit=!0;var e=this._callbacks;if(null!==e)for(var t=0;t<e.length;t++){var n=e[t];"function"!=typeof n&&a("191",n),n()}}},xa.prototype.render=function(e,t){var n=this._internalRoot,r=new wa;return null!==(t=void 0===t?null:t)&&r.then(t),ga(e,n,null,r._onCommit),r},xa.prototype.unmount=function(e){var t=this._internalRoot,n=new wa;return null!==(e=void 0===e?null:e)&&n.then(e),ga(null,t,null,n._onCommit),n},xa.prototype.legacy_renderSubtreeIntoContainer=function(e,t,n){var r=this._internalRoot,o=new wa;return null!==(n=void 0===n?null:n)&&o.then(n),ga(t,r,e,o._onCommit),o},xa.prototype.createBatch=function(){var e=new ba(this),t=e._expirationTime,n=this._internalRoot,r=n.firstBatch;if(null===r)n.firstBatch=e,e._next=null;else{for(n=null;null!==r&&r._expirationTime<=t;)n=r,r=r._next;e._next=r,null!==n&&(n._next=e)}return e},Ae=ha,De=va,Ne=function(){Fi||0===Yi||(ca(Yi,null),Yi=0)};var Ta={createPortal:Sa,findDOMNode:function(e){if(null==e)return null;if(1===e.nodeType)return e;var t=e._reactInternalFiber;return void 0===t&&("function"==typeof e.render?a("188"):a("268",Object.keys(e))),e=null===(e=rn(t))?null:e.stateNode},hydrate:function(e,t,n){return Oa(null,e,t,!0,n)},render:function(e,t,n){return Oa(null,e,t,!1,n)},unstable_renderSubtreeIntoContainer:function(e,t,n,r){return(null==e||void 0===e._reactInternalFiber)&&a("38"),Oa(e,t,n,!1,r)},unmountComponentAtNode:function(e){return Ea(e)||a("40"),!!e._reactRootContainer&&(ma(function(){Oa(null,null,e,!1,function(){e._reactRootContainer=null})}),!0)},unstable_createPortal:function(){return Sa.apply(void 0,arguments)},unstable_batchedUpdates:ha,unstable_interactiveUpdates:va,flushSync:function(e,t){Fi&&a("187");var n=Gi;Gi=!0;try{return Ai(e,t)}finally{Gi=n,ca(1,null)}},unstable_flushControlled:function(e){var t=Gi;Gi=!0;try{Ai(e)}finally{(Gi=t)||Fi||ca(1,null)}},__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED:{Events:[U,F,B,C.injectEventPluginsByName,g,z,function(e){T(e,H)},Me,Ie,Rn,I]},unstable_createRoot:function(e,t){return Ea(e)||a("278"),new xa(e,!0,null!=t&&!0===t.hydrate)}};!function(e){var t=e.findFiberByHostInstance;(function(e){if("undefined"==typeof __REACT_DEVTOOLS_GLOBAL_HOOK__)return!1;var t=__REACT_DEVTOOLS_GLOBAL_HOOK__;if(t.isDisabled||!t.supportsFiber)return!0;try{var n=t.inject(e);Ar=Nr(function(e){return t.onCommitFiberRoot(n,e)}),Dr=Nr(function(e){return t.onCommitFiberUnmount(n,e)})}catch(e){}})(o({},e,{findHostInstanceByFiber:function(e){return null===(e=rn(e))?null:e.stateNode},findFiberByHostInstance:function(e){return t?t(e):null}}))}({findFiberByHostInstance:L,bundleType:0,version:"16.5.2",rendererPackageName:"react-dom"});var ka={default:Ta},Ra=ka&&Ta||ka;e.exports=Ra.default||Ra},function(e,t,n){"use strict";e.exports=n(217)},function(e,t,n){"use strict"; -/** @license React v16.5.2 - * schedule.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */Object.defineProperty(t,"__esModule",{value:!0});var r=null,o=!1,i=!1,a="object"==typeof performance&&"function"==typeof performance.now,u={timeRemaining:a?function(){var e=m()-performance.now();return 0<e?e:0}:function(){var e=m()-Date.now();return 0<e?e:0},didTimeout:!1};function s(){if(!o){var e=r.timesOutAt;i?h():i=!0,p(l,e)}}function c(){var e=r,t=r.next;if(r===t)r=null;else{var n=r.previous;r=n.next=t,t.previous=n}e.next=e.previous=null,(e=e.callback)(u)}function l(e){o=!0,u.didTimeout=e;try{if(e)for(;null!==r;){var n=t.unstable_now();if(!(r.timesOutAt<=n))break;do{c()}while(null!==r&&r.timesOutAt<=n)}else if(null!==r)do{c()}while(null!==r&&0<m()-t.unstable_now())}finally{o=!1,null!==r?s():i=!1}}var f,d,p,h,m,v=Date,y="function"==typeof setTimeout?setTimeout:void 0,g="function"==typeof clearTimeout?clearTimeout:void 0,_="function"==typeof requestAnimationFrame?requestAnimationFrame:void 0,b="function"==typeof cancelAnimationFrame?cancelAnimationFrame:void 0;function w(e){f=_(function(t){g(d),e(t)}),d=y(function(){b(f),e(t.unstable_now())},100)}if(a){var x=performance;t.unstable_now=function(){return x.now()}}else t.unstable_now=function(){return v.now()};if("undefined"==typeof window){var E=-1;p=function(e){E=setTimeout(e,0,!0)},h=function(){clearTimeout(E)},m=function(){return 0}}else if(window._schedMock){var O=window._schedMock;p=O[0],h=O[1],m=O[2]}else{"undefined"!=typeof console&&("function"!=typeof _&&console.error("This browser doesn't support requestAnimationFrame. Make sure that you load a polyfill in older browsers. https://fb.me/react-polyfills"),"function"!=typeof b&&console.error("This browser doesn't support cancelAnimationFrame. Make sure that you load a polyfill in older browsers. https://fb.me/react-polyfills"));var S=null,T=!1,k=-1,R=!1,j=!1,P=0,C=33,M=33;m=function(){return P};var I="__reactIdleCallback$"+Math.random().toString(36).slice(2);window.addEventListener("message",function(e){if(e.source===window&&e.data===I){T=!1;var n=t.unstable_now();if(e=!1,0>=P-n){if(!(-1!==k&&k<=n))return void(R||(R=!0,w(A)));e=!0}if(k=-1,n=S,S=null,null!==n){j=!0;try{n(e)}finally{j=!1}}}},!1);var A=function(e){R=!1;var t=e-P+M;t<M&&C<M?(8>t&&(t=8),M=t<C?C:t):C=t,P=e+M,T||(T=!0,window.postMessage(I,"*"))};p=function(e,t){S=e,k=t,j?window.postMessage(I,"*"):R||(R=!0,w(A))},h=function(){S=null,T=!1,k=-1}}t.unstable_scheduleWork=function(e,n){var o=t.unstable_now();if(e={callback:e,timesOutAt:n=void 0!==n&&null!==n&&null!==n.timeout&&void 0!==n.timeout?o+n.timeout:o+5e3,next:null,previous:null},null===r)r=e.next=e.previous=e,s();else{o=null;var i=r;do{if(i.timesOutAt>n){o=i;break}i=i.next}while(i!==r);null===o?o=r:o===r&&(r=e,s()),(n=o.previous).next=o.previous=e,e.next=o,e.previous=n}return e},t.unstable_cancelScheduledWork=function(e){var t=e.next;if(null!==t){if(t===e)r=null;else{e===r&&(r=t);var n=e.previous;n.next=t,t.previous=n}e.next=e.previous=null}}},function(module,exports,__webpack_require__){"use strict";var evalAllowed=!1;try{eval("evalAllowed = true")}catch(e){}var platformSupported=!!Object.setPrototypeOf&&evalAllowed;module.exports=__webpack_require__(219)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=function(e){return e&&"object"==typeof e&&"default"in e?e.default:e}(n(1)),o=function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")},i=function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t},a=function(e){function t(){return o(this,t),i(this,e.apply(this,arguments))}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.render=function(){return r.Children.only(this.props.children)},t}(r.Component);t.AppContainer=a,t.hot=function(){return function(e){return e}},t.areComponentsEqual=function(e,t){return e===t},t.setConfig=function(){},t.cold=function(e){return e}},function(e,t,n){"use strict";t.a=u;var r=n(1),o=(n.n(r),n(12)),i=n.n(o),a=n(116);n(72);function u(){var e,t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"store",n=arguments[1]||t+"Subscription",o=function(e){function o(n,r){!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,o);var i=function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}(this,e.call(this,n,r));return i[t]=n.store,i}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(o,e),o.prototype.getChildContext=function(){var e;return(e={})[t]=this[t],e[n]=null,e},o.prototype.render=function(){return r.Children.only(this.props.children)},o}(r.Component);return o.propTypes={store:a.a.isRequired,children:i.a.element.isRequired},o.childContextTypes=((e={})[t]=a.a.isRequired,e[n]=a.b,e),o}t.b=u()},function(e,t,n){"use strict";var r=n(222);function o(){}e.exports=function(){function e(e,t,n,o,i,a){if(a!==r){var u=new Error("Calling PropTypes validators directly is not supported by the `prop-types` package. Use PropTypes.checkPropTypes() to call them. Read more at http://fb.me/use-check-prop-types");throw u.name="Invariant Violation",u}}function t(){return e}e.isRequired=e;var n={array:e,bool:e,func:e,number:e,object:e,string:e,symbol:e,any:e,arrayOf:t,element:e,instanceOf:t,node:e,objectOf:t,oneOf:t,oneOfType:t,shape:t,exact:t};return n.checkPropTypes=o,n.PropTypes=n,n}},function(e,t,n){"use strict";e.exports="SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED"},function(e,t,n){"use strict";n.d(t,"a",function(){return i});var r=null,o={notify:function(){}};var i=function(){function e(t,n,r){!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,e),this.store=t,this.parentSub=n,this.onStateChange=r,this.unsubscribe=null,this.listeners=o}return e.prototype.addNestedSub=function(e){return this.trySubscribe(),this.listeners.subscribe(e)},e.prototype.notifyNestedSubs=function(){this.listeners.notify()},e.prototype.isSubscribed=function(){return Boolean(this.unsubscribe)},e.prototype.trySubscribe=function(){this.unsubscribe||(this.unsubscribe=this.parentSub?this.parentSub.addNestedSub(this.onStateChange):this.store.subscribe(this.onStateChange),this.listeners=function(){var e=[],t=[];return{clear:function(){t=r,e=r},notify:function(){for(var n=e=t,r=0;r<n.length;r++)n[r]()},get:function(){return t},subscribe:function(n){var o=!0;return t===e&&(t=e.slice()),t.push(n),function(){o&&e!==r&&(o=!1,t===e&&(t=e.slice()),t.splice(t.indexOf(n),1))}}}}())},e.prototype.tryUnsubscribe=function(){this.unsubscribe&&(this.unsubscribe(),this.unsubscribe=null,this.listeners.clear(),this.listeners=o)},e}()},function(e,t,n){"use strict";var r=n(117),o=n(225),i=n(226),a=n(238),u=n(239),s=n(240),c=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};function l(e,t,n){for(var r=t.length-1;r>=0;r--){var o=t[r](e);if(o)return o}return function(t,r){throw new Error("Invalid value of type "+typeof e+" for "+n+" argument when connecting component "+r.wrappedComponentName+".")}}function f(e,t){return e===t}t.a=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.connectHOC,n=void 0===t?r.a:t,d=e.mapStateToPropsFactories,p=void 0===d?a.a:d,h=e.mapDispatchToPropsFactories,m=void 0===h?i.a:h,v=e.mergePropsFactories,y=void 0===v?u.a:v,g=e.selectorFactory,_=void 0===g?s.a:g;return function(e,t,r){var i=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{},a=i.pure,u=void 0===a||a,s=i.areStatesEqual,d=void 0===s?f:s,h=i.areOwnPropsEqual,v=void 0===h?o.a:h,g=i.areStatePropsEqual,b=void 0===g?o.a:g,w=i.areMergedPropsEqual,x=void 0===w?o.a:w,E=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(i,["pure","areStatesEqual","areOwnPropsEqual","areStatePropsEqual","areMergedPropsEqual"]),O=l(e,p,"mapStateToProps"),S=l(t,m,"mapDispatchToProps"),T=l(r,y,"mergeProps");return n(_,c({methodName:"connect",getDisplayName:function(e){return"Connect("+e+")"},shouldHandleStateChanges:Boolean(e),initMapStateToProps:O,initMapDispatchToProps:S,initMergeProps:T,pure:u,areStatesEqual:d,areOwnPropsEqual:v,areStatePropsEqual:b,areMergedPropsEqual:x},E))}}()},function(e,t,n){"use strict";t.a=function(e,t){if(o(e,t))return!0;if("object"!=typeof e||null===e||"object"!=typeof t||null===t)return!1;var n=Object.keys(e),i=Object.keys(t);if(n.length!==i.length)return!1;for(var a=0;a<n.length;a++)if(!r.call(t,n[a])||!o(e[n[a]],t[n[a]]))return!1;return!0};var r=Object.prototype.hasOwnProperty;function o(e,t){return e===t?0!==e||0!==t||1/e==1/t:e!=e&&t!=t}},function(e,t,n){"use strict";var r=n(15),o=n(120);t.a=[function(e){return"function"==typeof e?Object(o.b)(e,"mapDispatchToProps"):void 0},function(e){return e?void 0:Object(o.a)(function(e){return{dispatch:e}})},function(e){return e&&"object"==typeof e?Object(o.a)(function(t){return Object(r.bindActionCreators)(e,t)}):void 0}]},function(e,t,n){"use strict";(function(e,r){var o,i=n(228);o="undefined"!=typeof self?self:"undefined"!=typeof window?window:void 0!==e?e:r;var a=Object(i.a)(o);t.a=a}).call(t,n(3),n(119)(e))},function(e,t,n){"use strict";t.a=function(e){var t,n=e.Symbol;"function"==typeof n?n.observable?t=n.observable:(t=n("observable"),n.observable=t):t="@@observable";return t}},function(e,t,n){"use strict";var r=n(230),o=n(235),i=n(237),a="[object Object]",u=Function.prototype,s=Object.prototype,c=u.toString,l=s.hasOwnProperty,f=c.call(Object);t.a=function(e){if(!Object(i.a)(e)||Object(r.a)(e)!=a)return!1;var t=Object(o.a)(e);if(null===t)return!0;var n=l.call(t,"constructor")&&t.constructor;return"function"==typeof n&&n instanceof n&&c.call(n)==f}},function(e,t,n){"use strict";var r=n(122),o=n(233),i=n(234),a="[object Null]",u="[object Undefined]",s=r.a?r.a.toStringTag:void 0;t.a=function(e){return null==e?void 0===e?u:a:s&&s in Object(e)?Object(o.a)(e):Object(i.a)(e)}},function(e,t,n){"use strict";var r=n(232),o="object"==typeof self&&self&&self.Object===Object&&self,i=r.a||o||Function("return this")();t.a=i},function(e,t,n){"use strict";(function(e){var n="object"==typeof e&&e&&e.Object===Object&&e;t.a=n}).call(t,n(3))},function(e,t,n){"use strict";var r=n(122),o=Object.prototype,i=o.hasOwnProperty,a=o.toString,u=r.a?r.a.toStringTag:void 0;t.a=function(e){var t=i.call(e,u),n=e[u];try{e[u]=void 0;var r=!0}catch(e){}var o=a.call(e);return r&&(t?e[u]=n:delete e[u]),o}},function(e,t,n){"use strict";var r=Object.prototype.toString;t.a=function(e){return r.call(e)}},function(e,t,n){"use strict";var r=n(236),o=Object(r.a)(Object.getPrototypeOf,Object);t.a=o},function(e,t,n){"use strict";t.a=function(e,t){return function(n){return e(t(n))}}},function(e,t,n){"use strict";t.a=function(e){return null!=e&&"object"==typeof e}},function(e,t,n){"use strict";var r=n(120);t.a=[function(e){return"function"==typeof e?Object(r.b)(e,"mapStateToProps"):void 0},function(e){return e?void 0:Object(r.a)(function(){return{}})}]},function(e,t,n){"use strict";n(121);var r=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};function o(e,t,n){return r({},n,e,t)}t.a=[function(e){return"function"==typeof e?function(e){return function(t,n){n.displayName;var r=n.pure,o=n.areMergedPropsEqual,i=!1,a=void 0;return function(t,n,u){var s=e(t,n,u);return i?r&&o(s,a)||(a=s):(i=!0,a=s),a}}}(e):void 0},function(e){return e?void 0:function(){return o}}]},function(e,t,n){"use strict";t.a=function(e,t){var n=t.initMapStateToProps,i=t.initMapDispatchToProps,a=t.initMergeProps,u=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(t,["initMapStateToProps","initMapDispatchToProps","initMergeProps"]),s=n(e,u),c=i(e,u),l=a(e,u);0;return(u.pure?o:r)(s,c,l,e,u)};n(241);function r(e,t,n,r){return function(o,i){return n(e(o,i),t(r,i),i)}}function o(e,t,n,r,o){var i=o.areStatesEqual,a=o.areOwnPropsEqual,u=o.areStatePropsEqual,s=!1,c=void 0,l=void 0,f=void 0,d=void 0,p=void 0;function h(o,s){var h=!a(s,l),m=!i(o,c);return c=o,l=s,h&&m?(f=e(c,l),t.dependsOnOwnProps&&(d=t(r,l)),p=n(f,d,l)):h?(e.dependsOnOwnProps&&(f=e(c,l)),t.dependsOnOwnProps&&(d=t(r,l)),p=n(f,d,l)):m?function(){var t=e(c,l),r=!u(t,f);return f=t,r&&(p=n(f,d,l)),p}():p}return function(o,i){return s?h(o,i):function(o,i){return f=e(c=o,l=i),d=t(r,l),p=n(f,d,l),s=!0,p}(o,i)}}},function(e,t,n){"use strict";n(72)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=y(n(5)),o=y(n(6)),i=y(n(7)),a=y(n(8)),u=y(n(9)),s=n(1),c=y(s),l=n(135),f=n(86),d=n(11),p=v(n(208)),h=v(n(517)),m=v(n(528));function v(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}function y(e){return e&&e.__esModule?e:{default:e}}var g=function(e){function t(){return(0,o.default)(this,t),(0,a.default)(this,(t.__proto__||(0,r.default)(t)).apply(this,arguments))}return(0,u.default)(t,e),(0,i.default)(t,[{key:"render",value:function(){return c.default.createElement(l.ConnectedRouter,{history:this.props.history},c.default.createElement("div",null,c.default.createElement(d.Header,null),c.default.createElement("div",{className:"app"},c.default.createElement(f.Route,{path:"/metadata/",component:d.Sidebar}),c.default.createElement("div",{className:"body"},c.default.createElement(f.Route,{path:"/search/",component:h.Menu}),c.default.createElement(f.Route,{path:"/metadata/:hash/",component:p.Heading}),c.default.createElement(f.Switch,null,c.default.createElement(f.Route,{exact:!0,path:"/metadata/:hash/summary/",component:p.Summary}),c.default.createElement(f.Route,{exact:!0,path:"/metadata/:hash/mediaRecord/",component:p.MediaRecord}),c.default.createElement(f.Route,{exact:!0,path:"/metadata/:hash/mediaInfo/",component:p.MediaInfo}),c.default.createElement(f.Route,{exact:!0,path:"/metadata/:hash/keyframe/:frame/",component:p.KeyframeSingle}),c.default.createElement(f.Route,{exact:!0,path:"/metadata/:hash/keyframe/",component:p.KeyframeList}),c.default.createElement(f.Route,{exact:!0,path:"/metadata/:hash/coco/",component:p.Coco}),c.default.createElement(f.Route,{exact:!0,path:"/metadata/:hash/places365/",component:p.Places365}),c.default.createElement(f.Route,{exact:!0,path:"/metadata/:hash/sugarcube/",component:p.Sugarcube}),c.default.createElement(f.Route,{exact:!0,path:"/metadata/:hash/",component:p.Summary}),c.default.createElement(f.Route,{exact:!0,path:"/metadata/",render:function(){return c.default.createElement("div",{className:"notFound"},c.default.createElement("h4",null,"NOT FOUND"))}}),c.default.createElement(f.Route,{exact:!0,path:"/search/",component:h.Container}),c.default.createElement(f.Route,{exact:!0,path:"/search/keyframe/:verified/:hash/:frame/",component:h.Container}),c.default.createElement(f.Route,{exact:!0,path:"/search/keyframe/:hash/:frame/",component:h.Container}),c.default.createElement(f.Route,{exact:!0,path:"/search/browse/:hash/",component:h.Browse}),c.default.createElement(f.Route,{exact:!0,path:"/search/random/",component:h.Random}),c.default.createElement(f.Route,{exact:!0,path:"/search/review/",component:m.Saved})))),c.default.createElement(d.Footer,null)))}}]),t}(s.Component);t.default=g},function(e,t,n){n(244),e.exports=n(10).Object.getPrototypeOf},function(e,t,n){var r=n(41),o=n(123);n(124)("getPrototypeOf",function(){return function(e){return o(r(e))}})},function(e,t,n){n(246);var r=n(10).Object;e.exports=function(e,t,n){return r.defineProperty(e,t,n)}},function(e,t,n){var r=n(18);r(r.S+r.F*!n(24),"Object",{defineProperty:n(22).f})},function(e,t,n){e.exports={default:n(248),__esModule:!0}},function(e,t,n){n(44),n(58),e.exports=n(83).f("iterator")},function(e,t,n){var r=n(79),o=n(73);e.exports=function(e){return function(t,n){var i,a,u=String(o(t)),s=r(n),c=u.length;return s<0||s>=c?e?"":void 0:(i=u.charCodeAt(s))<55296||i>56319||s+1===c||(a=u.charCodeAt(s+1))<56320||a>57343?e?u.charAt(s):i:e?u.slice(s,s+2):a-56320+(i-55296<<10)+65536}}},function(e,t,n){"use strict";var r=n(80),o=n(43),i=n(57),a={};n(26)(a,n(14)("iterator"),function(){return this}),e.exports=function(e,t,n){e.prototype=r(a,{next:o(1,n)}),i(e,t+" Iterator")}},function(e,t,n){var r=n(22),o=n(20),i=n(45);e.exports=n(24)?Object.defineProperties:function(e,t){o(e);for(var n,a=i(t),u=a.length,s=0;u>s;)r.f(e,n=a[s++],t[n]);return e}},function(e,t,n){var r=n(37),o=n(81),i=n(253);e.exports=function(e){return function(t,n,a){var u,s=r(t),c=o(s.length),l=i(a,c);if(e&&n!=n){for(;c>l;)if((u=s[l++])!=u)return!0}else for(;c>l;l++)if((e||l in s)&&s[l]===n)return e||l||0;return!e&&-1}}},function(e,t,n){var r=n(79),o=Math.max,i=Math.min;e.exports=function(e,t){return(e=r(e))<0?o(e+t,0):i(e,t)}},function(e,t,n){"use strict";var r=n(255),o=n(256),i=n(36),a=n(37);e.exports=n(127)(Array,"Array",function(e,t){this._t=a(e),this._i=0,this._k=t},function(){var e=this._t,t=this._k,n=this._i++;return!e||n>=e.length?(this._t=void 0,o(1)):o(0,"keys"==t?n:"values"==t?e[n]:[n,e[n]])},"values"),i.Arguments=i.Array,r("keys"),r("values"),r("entries")},function(e,t){e.exports=function(){}},function(e,t){e.exports=function(e,t){return{value:t,done:!!e}}},function(e,t,n){e.exports={default:n(258),__esModule:!0}},function(e,t,n){n(259),n(134),n(264),n(265),e.exports=n(10).Symbol},function(e,t,n){"use strict";var r=n(13),o=n(25),i=n(24),a=n(18),u=n(128),s=n(260).KEY,c=n(35),l=n(75),f=n(57),d=n(55),p=n(14),h=n(83),m=n(84),v=n(261),y=n(262),g=n(20),_=n(23),b=n(37),w=n(77),x=n(43),E=n(80),O=n(263),S=n(133),T=n(22),k=n(45),R=S.f,j=T.f,P=O.f,C=r.Symbol,M=r.JSON,I=M&&M.stringify,A=p("_hidden"),D=p("toPrimitive"),N={}.propertyIsEnumerable,L=l("symbol-registry"),U=l("symbols"),F=l("op-symbols"),B=Object.prototype,W="function"==typeof C,Y=r.QObject,$=!Y||!Y.prototype||!Y.prototype.findChild,q=i&&c(function(){return 7!=E(j({},"a",{get:function(){return j(this,"a",{value:7}).a}})).a})?function(e,t,n){var r=R(B,t);r&&delete B[t],j(e,t,n),r&&e!==B&&j(B,t,r)}:j,H=function(e){var t=U[e]=E(C.prototype);return t._k=e,t},z=W&&"symbol"==typeof C.iterator?function(e){return"symbol"==typeof e}:function(e){return e instanceof C},G=function(e,t,n){return e===B&&G(F,t,n),g(e),t=w(t,!0),g(n),o(U,t)?(n.enumerable?(o(e,A)&&e[A][t]&&(e[A][t]=!1),n=E(n,{enumerable:x(0,!1)})):(o(e,A)||j(e,A,x(1,{})),e[A][t]=!0),q(e,t,n)):j(e,t,n)},V=function(e,t){g(e);for(var n,r=v(t=b(t)),o=0,i=r.length;i>o;)G(e,n=r[o++],t[n]);return e},X=function(e){var t=N.call(this,e=w(e,!0));return!(this===B&&o(U,e)&&!o(F,e))&&(!(t||!o(this,e)||!o(U,e)||o(this,A)&&this[A][e])||t)},K=function(e,t){if(e=b(e),t=w(t,!0),e!==B||!o(U,t)||o(F,t)){var n=R(e,t);return!n||!o(U,t)||o(e,A)&&e[A][t]||(n.enumerable=!0),n}},Q=function(e){for(var t,n=P(b(e)),r=[],i=0;n.length>i;)o(U,t=n[i++])||t==A||t==s||r.push(t);return r},J=function(e){for(var t,n=e===B,r=P(n?F:b(e)),i=[],a=0;r.length>a;)!o(U,t=r[a++])||n&&!o(B,t)||i.push(U[t]);return i};W||(u((C=function(){if(this instanceof C)throw TypeError("Symbol is not a constructor!");var e=d(arguments.length>0?arguments[0]:void 0),t=function(n){this===B&&t.call(F,n),o(this,A)&&o(this[A],e)&&(this[A][e]=!1),q(this,e,x(1,n))};return i&&$&&q(B,e,{configurable:!0,set:t}),H(e)}).prototype,"toString",function(){return this._k}),S.f=K,T.f=G,n(132).f=O.f=Q,n(59).f=X,n(85).f=J,i&&!n(42)&&u(B,"propertyIsEnumerable",X,!0),h.f=function(e){return H(p(e))}),a(a.G+a.W+a.F*!W,{Symbol:C});for(var Z="hasInstance,isConcatSpreadable,iterator,match,replace,search,species,split,toPrimitive,toStringTag,unscopables".split(","),ee=0;Z.length>ee;)p(Z[ee++]);for(var te=k(p.store),ne=0;te.length>ne;)m(te[ne++]);a(a.S+a.F*!W,"Symbol",{for:function(e){return o(L,e+="")?L[e]:L[e]=C(e)},keyFor:function(e){if(!z(e))throw TypeError(e+" is not a symbol!");for(var t in L)if(L[t]===e)return t},useSetter:function(){$=!0},useSimple:function(){$=!1}}),a(a.S+a.F*!W,"Object",{create:function(e,t){return void 0===t?E(e):V(E(e),t)},defineProperty:G,defineProperties:V,getOwnPropertyDescriptor:K,getOwnPropertyNames:Q,getOwnPropertySymbols:J}),M&&a(a.S+a.F*(!W||c(function(){var e=C();return"[null]"!=I([e])||"{}"!=I({a:e})||"{}"!=I(Object(e))})),"JSON",{stringify:function(e){for(var t,n,r=[e],o=1;arguments.length>o;)r.push(arguments[o++]);if(n=t=r[1],(_(t)||void 0!==e)&&!z(e))return y(t)||(t=function(e,t){if("function"==typeof n&&(t=n.call(this,e,t)),!z(t))return t}),r[1]=t,I.apply(M,r)}}),C.prototype[D]||n(26)(C.prototype,D,C.prototype.valueOf),f(C,"Symbol"),f(Math,"Math",!0),f(r.JSON,"JSON",!0)},function(e,t,n){var r=n(55)("meta"),o=n(23),i=n(25),a=n(22).f,u=0,s=Object.isExtensible||function(){return!0},c=!n(35)(function(){return s(Object.preventExtensions({}))}),l=function(e){a(e,r,{value:{i:"O"+ ++u,w:{}}})},f=e.exports={KEY:r,NEED:!1,fastKey:function(e,t){if(!o(e))return"symbol"==typeof e?e:("string"==typeof e?"S":"P")+e;if(!i(e,r)){if(!s(e))return"F";if(!t)return"E";l(e)}return e[r].i},getWeak:function(e,t){if(!i(e,r)){if(!s(e))return!0;if(!t)return!1;l(e)}return e[r].w},onFreeze:function(e){return c&&f.NEED&&s(e)&&!i(e,r)&&l(e),e}}},function(e,t,n){var r=n(45),o=n(85),i=n(59);e.exports=function(e){var t=r(e),n=o.f;if(n)for(var a,u=n(e),s=i.f,c=0;u.length>c;)s.call(e,a=u[c++])&&t.push(a);return t}},function(e,t,n){var r=n(46);e.exports=Array.isArray||function(e){return"Array"==r(e)}},function(e,t,n){var r=n(37),o=n(132).f,i={}.toString,a="object"==typeof window&&window&&Object.getOwnPropertyNames?Object.getOwnPropertyNames(window):[];e.exports.f=function(e){return a&&"[object Window]"==i.call(e)?function(e){try{return o(e)}catch(e){return a.slice()}}(e):o(r(e))}},function(e,t,n){n(84)("asyncIterator")},function(e,t,n){n(84)("observable")},function(e,t,n){e.exports={default:n(267),__esModule:!0}},function(e,t,n){n(268),e.exports=n(10).Object.setPrototypeOf},function(e,t,n){var r=n(18);r(r.S,"Object",{setPrototypeOf:n(269).set})},function(e,t,n){var r=n(23),o=n(20),i=function(e,t){if(o(e),!r(t)&&null!==t)throw TypeError(t+": can't set as prototype!")};e.exports={set:Object.setPrototypeOf||("__proto__"in{}?function(e,t,r){try{(r=n(34)(Function.call,n(133).f(Object.prototype,"__proto__").set,2))(e,[]),t=!(e instanceof Array)}catch(e){t=!0}return function(e,n){return i(e,n),t?e.__proto__=n:r(e,n),e}}({},!1):void 0),check:i}},function(e,t,n){e.exports={default:n(271),__esModule:!0}},function(e,t,n){n(272);var r=n(10).Object;e.exports=function(e,t){return r.create(e,t)}},function(e,t,n){var r=n(18);r(r.S,"Object",{create:n(80)})},function(e,t,n){"use strict";(function(e){Object.defineProperty(t,"__esModule",{value:!0}),t.__RewireAPI__=t.__ResetDependency__=t.__set__=t.__Rewire__=t.__GetDependency__=t.__get__=void 0;var r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},o=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},i=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(60)),a=l(n(274)),u=l(n(281)),s=l(n(282)),c=l(n(283));function l(e){return e&&e.__esModule?e:{default:e}}var f=function(e){return o({},_("actions"),_("createSelectors")(e),{ConnectedRouter:_("createConnectedRouter")(e),connectRouter:_("createConnectRouter")(e),routerMiddleware:_("routerMiddleware")})};function d(){try{if(e)return e}catch(e){try{if(window)return window}catch(e){return this}}}t.default=_("createAll");var p=null;function h(){if(null===p){var e=d();e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__||(e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__=0),p=__$$GLOBAL_REWIRE_NEXT_MODULE_ID__++}return p}function m(){var e=d();return e.__$$GLOBAL_REWIRE_REGISTRY__||(e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)),__$$GLOBAL_REWIRE_REGISTRY__}function v(){var e=h(),t=m(),n=t[e];return n||(t[e]=Object.create(null),n=t[e]),n}!function(){var e=d();e.__rewire_reset_all__||(e.__rewire_reset_all__=function(){e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)})}();var y="__INTENTIONAL_UNDEFINED__",g={};function _(e){var t=v();if(void 0===t[e])return function(e){switch(e){case"actions":return function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return Object.keys(e).filter(function(e){return"__get__"!==e&&"__set__"!==e&&"__reset__"!==e&&"__with__"!==e&&"__GetDependency__"!==e&&"__Rewire__"!==e&&"__ResetDependency__"!==e&&"__RewireAPI__"!==e}).reduce(function(t,n){return t[n]=e[n],t},{})}(i);case"createSelectors":return c.default;case"createConnectedRouter":return a.default;case"createConnectRouter":return u.default;case"routerMiddleware":return s.default;case"createAll":return f}return}(e);var n=t[e];return n===y?void 0:n}function b(e,t){var n=v();if("object"!==(void 0===e?"undefined":r(e)))return n[e]=void 0===t?y:t,function(){w(e)};Object.keys(e).forEach(function(t){n[t]=e[t]})}function w(e){var t=v();delete t[e],0==Object.keys(t).length&&delete m()[h]}function x(e){var t=v(),n=Object.keys(e),r={};function o(){n.forEach(function(e){t[e]=r[e]})}return function(i){n.forEach(function(n){r[n]=t[n],t[n]=e[n]});var a=i();return a&&"function"==typeof a.then?a.then(o).catch(o):o(),a}}!function(){function e(e,t){Object.defineProperty(g,e,{value:t,enumerable:!1,configurable:!0})}e("__get__",_),e("__GetDependency__",_),e("__Rewire__",b),e("__set__",b),e("__reset__",w),e("__ResetDependency__",w),e("__with__",x)}();var E=void 0===f?"undefined":r(f);function O(e,t){Object.defineProperty(f,e,{value:t,enumerable:!1,configurable:!0})}"object"!==E&&"function"!==E||!Object.isExtensible(f)||(O("__get__",_),O("__GetDependency__",_),O("__Rewire__",b),O("__set__",b),O("__reset__",w),O("__ResetDependency__",w),O("__with__",x),O("__RewireAPI__",g)),t.__get__=_,t.__GetDependency__=_,t.__Rewire__=b,t.__set__=b,t.__ResetDependency__=w,t.__RewireAPI__=g}).call(t,n(3))},function(e,t,n){"use strict";(function(e){Object.defineProperty(t,"__esModule",{value:!0}),t.__RewireAPI__=t.__ResetDependency__=t.__set__=t.__Rewire__=t.__GetDependency__=t.__get__=void 0;var r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},o=function(){function e(e,t){for(var n=0;n<t.length;n++){var r=t[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(e,r.key,r)}}return function(t,n,r){return n&&e(t.prototype,n),r&&e(t,r),t}}(),i=n(1),a=f(i),u=f(n(12)),s=n(2),c=n(86),l=n(60);function f(e){return e&&e.__esModule?e:{default:e}}var d=function(e){var t=e.getIn,n=e.toJS,r=function(e){function r(e,o){!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,r);var i=function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}(this,(r.__proto__||Object.getPrototypeOf(r)).call(this,e));i.inTimeTravelling=!1,i.unsubscribe=o.store.subscribe(function(){var r=n(t(o.store.getState(),["router","location"])),a=r.pathname,u=r.search,s=r.hash,c=e.history.location,l=c.pathname,f=c.search,d=c.hash;l===a&&f===u&&d===s||(i.inTimeTravelling=!0,e.history.push({pathname:a,search:u,hash:s}))});var a=function(t,n){i.inTimeTravelling?i.inTimeTravelling=!1:e.onLocationChanged(t,n)};return i.unlisten=e.history.listen(a),a(e.history.location,e.history.action),i}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(r,b("Component")),o(r,[{key:"componentWillUnmount",value:function(){this.unlisten(),this.unsubscribe()}},{key:"render",value:function(){var e=this.props,t=e.history,n=e.children;return b("React").createElement(b("Router"),{history:t},n)}}]),r}();r.contextTypes={store:b("PropTypes").shape({getState:b("PropTypes").func.isRequired,subscribe:b("PropTypes").func.isRequired}).isRequired},r.propTypes={history:b("PropTypes").shape({action:b("PropTypes").string.isRequired,listen:b("PropTypes").func.isRequired,location:b("PropTypes").object.isRequired,push:b("PropTypes").func.isRequired}).isRequired,location:b("PropTypes").oneOfType([b("PropTypes").object,b("PropTypes").string]).isRequired,action:b("PropTypes").string.isRequired,basename:b("PropTypes").string,children:b("PropTypes").oneOfType([b("PropTypes").func,b("PropTypes").node]),onLocationChanged:b("PropTypes").func.isRequired};return b("connect")(function(e){return{action:t(e,["router","action"]),location:t(e,["router","location"])}},function(e){return{onLocationChanged:function(t,n){return e(b("onLocationChanged")(t,n))}}})(r)};function p(){try{if(e)return e}catch(e){try{if(window)return window}catch(e){return this}}}t.default=b("createConnectedRouter");var h=null;function m(){if(null===h){var e=p();e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__||(e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__=0),h=__$$GLOBAL_REWIRE_NEXT_MODULE_ID__++}return h}function v(){var e=p();return e.__$$GLOBAL_REWIRE_REGISTRY__||(e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)),__$$GLOBAL_REWIRE_REGISTRY__}function y(){var e=m(),t=v(),n=t[e];return n||(t[e]=Object.create(null),n=t[e]),n}!function(){var e=p();e.__rewire_reset_all__||(e.__rewire_reset_all__=function(){e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)})}();var g="__INTENTIONAL_UNDEFINED__",_={};function b(e){var t=y();if(void 0===t[e])return function(e){switch(e){case"Component":return i.Component;case"PropTypes":return u.default;case"onLocationChanged":return l.onLocationChanged;case"connect":return s.connect;case"createConnectedRouter":return d;case"React":return a.default;case"Router":return c.Router}return}(e);var n=t[e];return n===g?void 0:n}function w(e,t){var n=y();if("object"!==(void 0===e?"undefined":r(e)))return n[e]=void 0===t?g:t,function(){x(e)};Object.keys(e).forEach(function(t){n[t]=e[t]})}function x(e){var t=y();delete t[e],0==Object.keys(t).length&&delete v()[m]}function E(e){var t=y(),n=Object.keys(e),r={};function o(){n.forEach(function(e){t[e]=r[e]})}return function(i){n.forEach(function(n){r[n]=t[n],t[n]=e[n]});var a=i();return a&&"function"==typeof a.then?a.then(o).catch(o):o(),a}}!function(){function e(e,t){Object.defineProperty(_,e,{value:t,enumerable:!1,configurable:!0})}e("__get__",b),e("__GetDependency__",b),e("__Rewire__",w),e("__set__",w),e("__reset__",x),e("__ResetDependency__",x),e("__with__",E)}();var O=void 0===d?"undefined":r(d);function S(e,t){Object.defineProperty(d,e,{value:t,enumerable:!1,configurable:!0})}"object"!==O&&"function"!==O||!Object.isExtensible(d)||(S("__get__",b),S("__GetDependency__",b),S("__Rewire__",w),S("__set__",w),S("__reset__",x),S("__ResetDependency__",x),S("__with__",E),S("__RewireAPI__",_)),t.__get__=b,t.__GetDependency__=b,t.__Rewire__=w,t.__set__=w,t.__ResetDependency__=x,t.__RewireAPI__=_}).call(t,n(3))},function(e,t,n){"use strict";var r=n(61),o=n.n(r),i=n(19),a=n.n(i),u=n(62),s=n(47),c=n(87),l=n(137),f="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},d=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},p=function(){try{return window.history.state||{}}catch(e){return{}}};t.a=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};a()(l.b,"Browser history needs a DOM");var t=window.history,n=Object(l.g)(),r=!Object(l.h)(),i=e.forceRefresh,h=void 0!==i&&i,m=e.getUserConfirmation,v=void 0===m?l.c:m,y=e.keyLength,g=void 0===y?6:y,_=e.basename?Object(s.g)(Object(s.a)(e.basename)):"",b=function(e){var t=e||{},n=t.key,r=t.state,i=window.location,a=i.pathname+i.search+i.hash;return o()(!_||Object(s.c)(a,_),'You are attempting to use a basename on a page whose URL path does not begin with the basename. Expected path "'+a+'" to begin with "'+_+'".'),_&&(a=Object(s.e)(a,_)),Object(u.a)(a,r,n)},w=function(){return Math.random().toString(36).substr(2,g)},x=Object(c.a)(),E=function(e){d(N,e),N.length=t.length,x.notifyListeners(N.location,N.action)},O=function(e){Object(l.d)(e)||k(b(e.state))},S=function(){k(b(p()))},T=!1,k=function(e){T?(T=!1,E()):x.confirmTransitionTo(e,"POP",v,function(t){t?E({action:"POP",location:e}):R(e)})},R=function(e){var t=N.location,n=P.indexOf(t.key);-1===n&&(n=0);var r=P.indexOf(e.key);-1===r&&(r=0);var o=n-r;o&&(T=!0,M(o))},j=b(p()),P=[j.key],C=function(e){return _+Object(s.b)(e)},M=function(e){t.go(e)},I=0,A=function(e){1===(I+=e)?(Object(l.a)(window,"popstate",O),r&&Object(l.a)(window,"hashchange",S)):0===I&&(Object(l.e)(window,"popstate",O),r&&Object(l.e)(window,"hashchange",S))},D=!1,N={length:t.length,action:"POP",location:j,createHref:C,push:function(e,r){o()(!("object"===(void 0===e?"undefined":f(e))&&void 0!==e.state&&void 0!==r),"You should avoid providing a 2nd state argument to push when the 1st argument is a location-like object that already has state; it is ignored");var i=Object(u.a)(e,r,w(),N.location);x.confirmTransitionTo(i,"PUSH",v,function(e){if(e){var r=C(i),a=i.key,u=i.state;if(n)if(t.pushState({key:a,state:u},null,r),h)window.location.href=r;else{var s=P.indexOf(N.location.key),c=P.slice(0,-1===s?0:s+1);c.push(i.key),P=c,E({action:"PUSH",location:i})}else o()(void 0===u,"Browser history cannot push state in browsers that do not support HTML5 history"),window.location.href=r}})},replace:function(e,r){o()(!("object"===(void 0===e?"undefined":f(e))&&void 0!==e.state&&void 0!==r),"You should avoid providing a 2nd state argument to replace when the 1st argument is a location-like object that already has state; it is ignored");var i=Object(u.a)(e,r,w(),N.location);x.confirmTransitionTo(i,"REPLACE",v,function(e){if(e){var r=C(i),a=i.key,u=i.state;if(n)if(t.replaceState({key:a,state:u},null,r),h)window.location.replace(r);else{var s=P.indexOf(N.location.key);-1!==s&&(P[s]=i.key),E({action:"REPLACE",location:i})}else o()(void 0===u,"Browser history cannot replace state in browsers that do not support HTML5 history"),window.location.replace(r)}})},go:M,goBack:function(){return M(-1)},goForward:function(){return M(1)},block:function(){var e=arguments.length>0&&void 0!==arguments[0]&&arguments[0],t=x.setPrompt(e);return D||(A(1),D=!0),function(){return D&&(D=!1,A(-1)),t()}},listen:function(e){var t=x.appendListener(e);return A(1),function(){A(-1),t()}}};return N}},function(e,t,n){"use strict";function r(e){return"/"===e.charAt(0)}function o(e,t){for(var n=t,r=n+1,o=e.length;r<o;n+=1,r+=1)e[n]=e[r];e.pop()}t.a=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"",n=e&&e.split("/")||[],i=t&&t.split("/")||[],a=e&&r(e),u=t&&r(t),s=a||u;if(e&&r(e)?i=n:n.length&&(i.pop(),i=i.concat(n)),!i.length)return"/";var c=void 0;if(i.length){var l=i[i.length-1];c="."===l||".."===l||""===l}else c=!1;for(var f=0,d=i.length;d>=0;d--){var p=i[d];"."===p?o(i,d):".."===p?(o(i,d),f++):f&&(o(i,d),f--)}if(!s)for(;f--;f)i.unshift("..");!s||""===i[0]||i[0]&&r(i[0])||i.unshift("");var h=i.join("/");return c&&"/"!==h.substr(-1)&&(h+="/"),h}},function(e,t,n){"use strict";var r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e};t.a=function e(t,n){if(t===n)return!0;if(null==t||null==n)return!1;if(Array.isArray(t))return Array.isArray(n)&&t.length===n.length&&t.every(function(t,r){return e(t,n[r])});var o=void 0===t?"undefined":r(t);if(o!==(void 0===n?"undefined":r(n)))return!1;if("object"===o){var i=t.valueOf(),a=n.valueOf();if(i!==t||a!==n)return e(i,a);var u=Object.keys(t),s=Object.keys(n);return u.length===s.length&&u.every(function(r){return e(t[r],n[r])})}return!1}},function(e,t,n){"use strict";var r=n(61),o=n.n(r),i=n(19),a=n.n(i),u=n(62),s=n(47),c=n(87),l=n(137),f=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},d={hashbang:{encodePath:function(e){return"!"===e.charAt(0)?e:"!/"+Object(s.f)(e)},decodePath:function(e){return"!"===e.charAt(0)?e.substr(1):e}},noslash:{encodePath:s.f,decodePath:s.a},slash:{encodePath:s.a,decodePath:s.a}},p=function(){var e=window.location.href,t=e.indexOf("#");return-1===t?"":e.substring(t+1)},h=function(e){var t=window.location.href.indexOf("#");window.location.replace(window.location.href.slice(0,t>=0?t:0)+"#"+e)};t.a=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};a()(l.b,"Hash history needs a DOM");var t=window.history,n=Object(l.f)(),r=e.getUserConfirmation,i=void 0===r?l.c:r,m=e.hashType,v=void 0===m?"slash":m,y=e.basename?Object(s.g)(Object(s.a)(e.basename)):"",g=d[v],_=g.encodePath,b=g.decodePath,w=function(){var e=b(p());return o()(!y||Object(s.c)(e,y),'You are attempting to use a basename on a page whose URL path does not begin with the basename. Expected path "'+e+'" to begin with "'+y+'".'),y&&(e=Object(s.e)(e,y)),Object(u.a)(e)},x=Object(c.a)(),E=function(e){f(L,e),L.length=t.length,x.notifyListeners(L.location,L.action)},O=!1,S=null,T=function(){var e=p(),t=_(e);if(e!==t)h(t);else{var n=w(),r=L.location;if(!O&&Object(u.b)(r,n))return;if(S===Object(s.b)(n))return;S=null,k(n)}},k=function(e){O?(O=!1,E()):x.confirmTransitionTo(e,"POP",i,function(t){t?E({action:"POP",location:e}):R(e)})},R=function(e){var t=L.location,n=M.lastIndexOf(Object(s.b)(t));-1===n&&(n=0);var r=M.lastIndexOf(Object(s.b)(e));-1===r&&(r=0);var o=n-r;o&&(O=!0,I(o))},j=p(),P=_(j);j!==P&&h(P);var C=w(),M=[Object(s.b)(C)],I=function(e){o()(n,"Hash history go(n) causes a full page reload in this browser"),t.go(e)},A=0,D=function(e){1===(A+=e)?Object(l.a)(window,"hashchange",T):0===A&&Object(l.e)(window,"hashchange",T)},N=!1,L={length:t.length,action:"POP",location:C,createHref:function(e){return"#"+_(y+Object(s.b)(e))},push:function(e,t){o()(void 0===t,"Hash history cannot push state; it is ignored");var n=Object(u.a)(e,void 0,void 0,L.location);x.confirmTransitionTo(n,"PUSH",i,function(e){if(e){var t=Object(s.b)(n),r=_(y+t);if(p()!==r){S=t,function(e){window.location.hash=e}(r);var i=M.lastIndexOf(Object(s.b)(L.location)),a=M.slice(0,-1===i?0:i+1);a.push(t),M=a,E({action:"PUSH",location:n})}else o()(!1,"Hash history cannot PUSH the same path; a new entry will not be added to the history stack"),E()}})},replace:function(e,t){o()(void 0===t,"Hash history cannot replace state; it is ignored");var n=Object(u.a)(e,void 0,void 0,L.location);x.confirmTransitionTo(n,"REPLACE",i,function(e){if(e){var t=Object(s.b)(n),r=_(y+t);p()!==r&&(S=t,h(r));var o=M.indexOf(Object(s.b)(L.location));-1!==o&&(M[o]=t),E({action:"REPLACE",location:n})}})},go:I,goBack:function(){return I(-1)},goForward:function(){return I(1)},block:function(){var e=arguments.length>0&&void 0!==arguments[0]&&arguments[0],t=x.setPrompt(e);return N||(D(1),N=!0),function(){return N&&(N=!1,D(-1)),t()}},listen:function(e){var t=x.appendListener(e);return D(1),function(){D(-1),t()}}};return L}},function(e,t,n){"use strict";var r=n(61),o=n.n(r),i=n(47),a=n(62),u=n(87),s="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},c=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},l=function(e,t,n){return Math.min(Math.max(e,t),n)};t.a=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.getUserConfirmation,n=e.initialEntries,r=void 0===n?["/"]:n,f=e.initialIndex,d=void 0===f?0:f,p=e.keyLength,h=void 0===p?6:p,m=Object(u.a)(),v=function(e){c(x,e),x.length=x.entries.length,m.notifyListeners(x.location,x.action)},y=function(){return Math.random().toString(36).substr(2,h)},g=l(d,0,r.length-1),_=r.map(function(e){return"string"==typeof e?Object(a.a)(e,void 0,y()):Object(a.a)(e,void 0,e.key||y())}),b=i.b,w=function(e){var n=l(x.index+e,0,x.entries.length-1),r=x.entries[n];m.confirmTransitionTo(r,"POP",t,function(e){e?v({action:"POP",location:r,index:n}):v()})},x={length:_.length,action:"POP",location:_[g],index:g,entries:_,createHref:b,push:function(e,n){o()(!("object"===(void 0===e?"undefined":s(e))&&void 0!==e.state&&void 0!==n),"You should avoid providing a 2nd state argument to push when the 1st argument is a location-like object that already has state; it is ignored");var r=Object(a.a)(e,n,y(),x.location);m.confirmTransitionTo(r,"PUSH",t,function(e){if(e){var t=x.index+1,n=x.entries.slice(0);n.length>t?n.splice(t,n.length-t,r):n.push(r),v({action:"PUSH",location:r,index:t,entries:n})}})},replace:function(e,n){o()(!("object"===(void 0===e?"undefined":s(e))&&void 0!==e.state&&void 0!==n),"You should avoid providing a 2nd state argument to replace when the 1st argument is a location-like object that already has state; it is ignored");var r=Object(a.a)(e,n,y(),x.location);m.confirmTransitionTo(r,"REPLACE",t,function(e){e&&(x.entries[x.index]=r,v({action:"REPLACE",location:r}))})},go:w,goBack:function(){return w(-1)},goForward:function(){return w(1)},canGo:function(e){var t=x.index+e;return t>=0&&t<x.entries.length},block:function(){var e=arguments.length>0&&void 0!==arguments[0]&&arguments[0];return m.setPrompt(e)},listen:function(e){return m.appendListener(e)}};return x}},function(e,t){e.exports=Array.isArray||function(e){return"[object Array]"==Object.prototype.toString.call(e)}},function(e,t,n){"use strict";(function(e){Object.defineProperty(t,"__esModule",{value:!0}),t.__RewireAPI__=t.__ResetDependency__=t.__set__=t.__Rewire__=t.__GetDependency__=t.__get__=void 0;var r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},o=n(60),i=function(e){var t=e.filterNotRouter,n=e.fromJS,r=e.getIn,o=e.merge,i=e.setIn;return function(e){var a=n({location:e.location,action:e.action});return function(e){return function(n,u){var s=a;n&&(s=r(n,["router"])||s,n=t(n));var c=e(n,u);return i(c,["router"],function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.type,r=t.payload;return n===p("LOCATION_CHANGE")?o(e,r):e}(s,u))}}}};function a(){try{if(e)return e}catch(e){try{if(window)return window}catch(e){return this}}}t.default=p("createConnectRouter");var u=null;function s(){if(null===u){var e=a();e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__||(e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__=0),u=__$$GLOBAL_REWIRE_NEXT_MODULE_ID__++}return u}function c(){var e=a();return e.__$$GLOBAL_REWIRE_REGISTRY__||(e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)),__$$GLOBAL_REWIRE_REGISTRY__}function l(){var e=s(),t=c(),n=t[e];return n||(t[e]=Object.create(null),n=t[e]),n}!function(){var e=a();e.__rewire_reset_all__||(e.__rewire_reset_all__=function(){e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)})}();var f="__INTENTIONAL_UNDEFINED__",d={};function p(e){var t=l();if(void 0===t[e])return function(e){switch(e){case"LOCATION_CHANGE":return o.LOCATION_CHANGE;case"createConnectRouter":return i}return}(e);var n=t[e];return n===f?void 0:n}function h(e,t){var n=l();if("object"!==(void 0===e?"undefined":r(e)))return n[e]=void 0===t?f:t,function(){m(e)};Object.keys(e).forEach(function(t){n[t]=e[t]})}function m(e){var t=l();delete t[e],0==Object.keys(t).length&&delete c()[s]}function v(e){var t=l(),n=Object.keys(e),r={};function o(){n.forEach(function(e){t[e]=r[e]})}return function(i){n.forEach(function(n){r[n]=t[n],t[n]=e[n]});var a=i();return a&&"function"==typeof a.then?a.then(o).catch(o):o(),a}}!function(){function e(e,t){Object.defineProperty(d,e,{value:t,enumerable:!1,configurable:!0})}e("__get__",p),e("__GetDependency__",p),e("__Rewire__",h),e("__set__",h),e("__reset__",m),e("__ResetDependency__",m),e("__with__",v)}();var y=void 0===i?"undefined":r(i);function g(e,t){Object.defineProperty(i,e,{value:t,enumerable:!1,configurable:!0})}"object"!==y&&"function"!==y||!Object.isExtensible(i)||(g("__get__",p),g("__GetDependency__",p),g("__Rewire__",h),g("__set__",h),g("__reset__",m),g("__ResetDependency__",m),g("__with__",v),g("__RewireAPI__",d)),t.__get__=p,t.__GetDependency__=p,t.__Rewire__=h,t.__set__=h,t.__ResetDependency__=m,t.__RewireAPI__=d}).call(t,n(3))},function(e,t,n){"use strict";(function(e){Object.defineProperty(t,"__esModule",{value:!0}),t.__RewireAPI__=t.__ResetDependency__=t.__set__=t.__Rewire__=t.__GetDependency__=t.__get__=void 0;var r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},o=n(60);var i=function(e){return function(t){return function(t){return function(n){if(n.type!==p("CALL_HISTORY_METHOD"))return t(n);var r=n.payload,o=r.method,i=r.args;e[o].apply(e,function(e){if(Array.isArray(e)){for(var t=0,n=Array(e.length);t<e.length;t++)n[t]=e[t];return n}return Array.from(e)}(i))}}}};function a(){try{if(e)return e}catch(e){try{if(window)return window}catch(e){return this}}}t.default=p("routerMiddleware");var u=null;function s(){if(null===u){var e=a();e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__||(e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__=0),u=__$$GLOBAL_REWIRE_NEXT_MODULE_ID__++}return u}function c(){var e=a();return e.__$$GLOBAL_REWIRE_REGISTRY__||(e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)),__$$GLOBAL_REWIRE_REGISTRY__}function l(){var e=s(),t=c(),n=t[e];return n||(t[e]=Object.create(null),n=t[e]),n}!function(){var e=a();e.__rewire_reset_all__||(e.__rewire_reset_all__=function(){e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)})}();var f="__INTENTIONAL_UNDEFINED__",d={};function p(e){var t=l();if(void 0===t[e])return function(e){switch(e){case"CALL_HISTORY_METHOD":return o.CALL_HISTORY_METHOD;case"routerMiddleware":return i}return}(e);var n=t[e];return n===f?void 0:n}function h(e,t){var n=l();if("object"!==(void 0===e?"undefined":r(e)))return n[e]=void 0===t?f:t,function(){m(e)};Object.keys(e).forEach(function(t){n[t]=e[t]})}function m(e){var t=l();delete t[e],0==Object.keys(t).length&&delete c()[s]}function v(e){var t=l(),n=Object.keys(e),r={};function o(){n.forEach(function(e){t[e]=r[e]})}return function(i){n.forEach(function(n){r[n]=t[n],t[n]=e[n]});var a=i();return a&&"function"==typeof a.then?a.then(o).catch(o):o(),a}}!function(){function e(e,t){Object.defineProperty(d,e,{value:t,enumerable:!1,configurable:!0})}e("__get__",p),e("__GetDependency__",p),e("__Rewire__",h),e("__set__",h),e("__reset__",m),e("__ResetDependency__",m),e("__with__",v)}();var y=void 0===i?"undefined":r(i);function g(e,t){Object.defineProperty(i,e,{value:t,enumerable:!1,configurable:!0})}"object"!==y&&"function"!==y||!Object.isExtensible(i)||(g("__get__",p),g("__GetDependency__",p),g("__Rewire__",h),g("__set__",h),g("__reset__",m),g("__ResetDependency__",m),g("__with__",v),g("__RewireAPI__",d)),t.__get__=p,t.__GetDependency__=p,t.__Rewire__=h,t.__set__=h,t.__ResetDependency__=m,t.__RewireAPI__=d}).call(t,n(3))},function(e,t,n){"use strict";(function(e){Object.defineProperty(t,"__esModule",{value:!0}),t.__RewireAPI__=t.__ResetDependency__=t.__set__=t.__Rewire__=t.__GetDependency__=t.__get__=void 0;var r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},o=n(86),i=function(e){var t=e.getIn,n=e.toJS,r=function(e){return n(t(e,["router","location"]))};return{getLocation:r,getAction:function(e){return n(t(e,["router","action"]))},createMatchSelector:function(e){var t=null,n=null;return function(o){var i=(r(o)||{}).pathname;if(i===t)return n;t=i;var a=p("matchPath")(i,e);return a&&n&&a.url===n.url||(n=a),n}}}};function a(){try{if(e)return e}catch(e){try{if(window)return window}catch(e){return this}}}t.default=p("createSelectors");var u=null;function s(){if(null===u){var e=a();e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__||(e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__=0),u=__$$GLOBAL_REWIRE_NEXT_MODULE_ID__++}return u}function c(){var e=a();return e.__$$GLOBAL_REWIRE_REGISTRY__||(e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)),__$$GLOBAL_REWIRE_REGISTRY__}function l(){var e=s(),t=c(),n=t[e];return n||(t[e]=Object.create(null),n=t[e]),n}!function(){var e=a();e.__rewire_reset_all__||(e.__rewire_reset_all__=function(){e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)})}();var f="__INTENTIONAL_UNDEFINED__",d={};function p(e){var t=l();if(void 0===t[e])return function(e){switch(e){case"matchPath":return o.matchPath;case"createSelectors":return i}return}(e);var n=t[e];return n===f?void 0:n}function h(e,t){var n=l();if("object"!==(void 0===e?"undefined":r(e)))return n[e]=void 0===t?f:t,function(){m(e)};Object.keys(e).forEach(function(t){n[t]=e[t]})}function m(e){var t=l();delete t[e],0==Object.keys(t).length&&delete c()[s]}function v(e){var t=l(),n=Object.keys(e),r={};function o(){n.forEach(function(e){t[e]=r[e]})}return function(i){n.forEach(function(n){r[n]=t[n],t[n]=e[n]});var a=i();return a&&"function"==typeof a.then?a.then(o).catch(o):o(),a}}!function(){function e(e,t){Object.defineProperty(d,e,{value:t,enumerable:!1,configurable:!0})}e("__get__",p),e("__GetDependency__",p),e("__Rewire__",h),e("__set__",h),e("__reset__",m),e("__ResetDependency__",m),e("__with__",v)}();var y=void 0===i?"undefined":r(i);function g(e,t){Object.defineProperty(i,e,{value:t,enumerable:!1,configurable:!0})}"object"!==y&&"function"!==y||!Object.isExtensible(i)||(g("__get__",p),g("__GetDependency__",p),g("__Rewire__",h),g("__set__",h),g("__reset__",m),g("__ResetDependency__",m),g("__with__",v),g("__RewireAPI__",d)),t.__get__=p,t.__GetDependency__=p,t.__Rewire__=h,t.__set__=h,t.__ResetDependency__=m,t.__RewireAPI__=d}).call(t,n(3))},function(e,t,n){"use strict";(function(e){Object.defineProperty(t,"__esModule",{value:!0}),t.__RewireAPI__=t.__ResetDependency__=t.__set__=t.__Rewire__=t.__GetDependency__=t.__get__=void 0;var r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},o=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},i=u(n(285)),a=u(n(286));function u(e){return e&&e.__esModule?e:{default:e}}var s={filterNotRouter:function(e){e.router;return function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(e,["router"])},fromJS:function(e){return e},getIn:v("getIn"),merge:function(e,t){return o({},e,t)},setIn:v("setIn"),toJS:function(e){return e}};function c(){try{if(e)return e}catch(e){try{if(window)return window}catch(e){return this}}}t.default=v("structure");var l=null;function f(){if(null===l){var e=c();e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__||(e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__=0),l=__$$GLOBAL_REWIRE_NEXT_MODULE_ID__++}return l}function d(){var e=c();return e.__$$GLOBAL_REWIRE_REGISTRY__||(e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)),__$$GLOBAL_REWIRE_REGISTRY__}function p(){var e=f(),t=d(),n=t[e];return n||(t[e]=Object.create(null),n=t[e]),n}!function(){var e=c();e.__rewire_reset_all__||(e.__rewire_reset_all__=function(){e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)})}();var h="__INTENTIONAL_UNDEFINED__",m={};function v(e){var t=p();if(void 0===t[e])return function(e){switch(e){case"getIn":return i.default;case"setIn":return a.default;case"structure":return s}return}(e);var n=t[e];return n===h?void 0:n}function y(e,t){var n=p();if("object"!==(void 0===e?"undefined":r(e)))return n[e]=void 0===t?h:t,function(){g(e)};Object.keys(e).forEach(function(t){n[t]=e[t]})}function g(e){var t=p();delete t[e],0==Object.keys(t).length&&delete d()[f]}function _(e){var t=p(),n=Object.keys(e),r={};function o(){n.forEach(function(e){t[e]=r[e]})}return function(i){n.forEach(function(n){r[n]=t[n],t[n]=e[n]});var a=i();return a&&"function"==typeof a.then?a.then(o).catch(o):o(),a}}!function(){function e(e,t){Object.defineProperty(m,e,{value:t,enumerable:!1,configurable:!0})}e("__get__",v),e("__GetDependency__",v),e("__Rewire__",y),e("__set__",y),e("__reset__",g),e("__ResetDependency__",g),e("__with__",_)}();var b=void 0===s?"undefined":r(s);function w(e,t){Object.defineProperty(s,e,{value:t,enumerable:!1,configurable:!0})}"object"!==b&&"function"!==b||!Object.isExtensible(s)||(w("__get__",v),w("__GetDependency__",v),w("__Rewire__",y),w("__set__",y),w("__reset__",g),w("__ResetDependency__",g),w("__with__",_),w("__RewireAPI__",m)),t.__get__=v,t.__GetDependency__=v,t.__Rewire__=y,t.__set__=y,t.__ResetDependency__=g,t.__RewireAPI__=m}).call(t,n(3))},function(e,t,n){"use strict";(function(e){Object.defineProperty(t,"__esModule",{value:!0});var n="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},r=function(e,t){if(!e)return e;var n=t.length;if(n){for(var r=e,o=0;o<n&&r;++o)r=r[t[o]];return r}};function o(){try{if(e)return e}catch(e){try{if(window)return window}catch(e){return this}}}t.default=f("getIn");var i=null;function a(){if(null===i){var e=o();e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__||(e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__=0),i=__$$GLOBAL_REWIRE_NEXT_MODULE_ID__++}return i}function u(){var e=o();return e.__$$GLOBAL_REWIRE_REGISTRY__||(e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)),__$$GLOBAL_REWIRE_REGISTRY__}function s(){var e=a(),t=u(),n=t[e];return n||(t[e]=Object.create(null),n=t[e]),n}!function(){var e=o();e.__rewire_reset_all__||(e.__rewire_reset_all__=function(){e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)})}();var c="__INTENTIONAL_UNDEFINED__",l={};function f(e){var t=s();if(void 0===t[e])return function(e){switch(e){case"getIn":return r}return}(e);var n=t[e];return n===c?void 0:n}function d(e,t){var r=s();if("object"!==(void 0===e?"undefined":n(e)))return r[e]=void 0===t?c:t,function(){p(e)};Object.keys(e).forEach(function(t){r[t]=e[t]})}function p(e){var t=s();delete t[e],0==Object.keys(t).length&&delete u()[a]}function h(e){var t=s(),n=Object.keys(e),r={};function o(){n.forEach(function(e){t[e]=r[e]})}return function(i){n.forEach(function(n){r[n]=t[n],t[n]=e[n]});var a=i();return a&&"function"==typeof a.then?a.then(o).catch(o):o(),a}}!function(){function e(e,t){Object.defineProperty(l,e,{value:t,enumerable:!1,configurable:!0})}e("__get__",f),e("__GetDependency__",f),e("__Rewire__",d),e("__set__",d),e("__reset__",p),e("__ResetDependency__",p),e("__with__",h)}();var m=void 0===r?"undefined":n(r);function v(e,t){Object.defineProperty(r,e,{value:t,enumerable:!1,configurable:!0})}"object"!==m&&"function"!==m||!Object.isExtensible(r)||(v("__get__",f),v("__GetDependency__",f),v("__Rewire__",d),v("__set__",d),v("__reset__",p),v("__ResetDependency__",p),v("__with__",h),v("__RewireAPI__",l)),t.__get__=f,t.__GetDependency__=f,t.__Rewire__=d,t.__set__=d,t.__ResetDependency__=p,t.__RewireAPI__=l}).call(t,n(3))},function(e,t,n){"use strict";(function(e){Object.defineProperty(t,"__esModule",{value:!0});var n="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},r=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};var o=function(e,t,n,o){if(o>=n.length)return t;var i=n[o],a=p("setInWithPath")(e&&e[i],t,n,o+1);if(!e){var u=isNaN(i)?{}:[];return u[i]=a,u}if(Array.isArray(e)){var s=[].concat(e);return s[i]=a,s}return r({},e,function(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}({},i,a))},i=function(e,t,n){return p("setInWithPath")(e,n,t,0)};function a(){try{if(e)return e}catch(e){try{if(window)return window}catch(e){return this}}}t.default=p("setIn");var u=null;function s(){if(null===u){var e=a();e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__||(e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__=0),u=__$$GLOBAL_REWIRE_NEXT_MODULE_ID__++}return u}function c(){var e=a();return e.__$$GLOBAL_REWIRE_REGISTRY__||(e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)),__$$GLOBAL_REWIRE_REGISTRY__}function l(){var e=s(),t=c(),n=t[e];return n||(t[e]=Object.create(null),n=t[e]),n}!function(){var e=a();e.__rewire_reset_all__||(e.__rewire_reset_all__=function(){e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)})}();var f="__INTENTIONAL_UNDEFINED__",d={};function p(e){var t=l();if(void 0===t[e])return function(e){switch(e){case"setInWithPath":return o;case"setIn":return i}return}(e);var n=t[e];return n===f?void 0:n}function h(e,t){var r=l();if("object"!==(void 0===e?"undefined":n(e)))return r[e]=void 0===t?f:t,function(){m(e)};Object.keys(e).forEach(function(t){r[t]=e[t]})}function m(e){var t=l();delete t[e],0==Object.keys(t).length&&delete c()[s]}function v(e){var t=l(),n=Object.keys(e),r={};function o(){n.forEach(function(e){t[e]=r[e]})}return function(i){n.forEach(function(n){r[n]=t[n],t[n]=e[n]});var a=i();return a&&"function"==typeof a.then?a.then(o).catch(o):o(),a}}!function(){function e(e,t){Object.defineProperty(d,e,{value:t,enumerable:!1,configurable:!0})}e("__get__",p),e("__GetDependency__",p),e("__Rewire__",h),e("__set__",h),e("__reset__",m),e("__ResetDependency__",m),e("__with__",v)}();var y=void 0===i?"undefined":n(i);function g(e,t){Object.defineProperty(i,e,{value:t,enumerable:!1,configurable:!0})}"object"!==y&&"function"!==y||!Object.isExtensible(i)||(g("__get__",p),g("__GetDependency__",p),g("__Rewire__",h),g("__set__",h),g("__reset__",m),g("__ResetDependency__",m),g("__with__",v),g("__RewireAPI__",d)),t.__get__=p,t.__GetDependency__=p,t.__Rewire__=h,t.__set__=h,t.__ResetDependency__=m,t.__RewireAPI__=d}).call(t,n(3))},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=function(e){return e&&e.__esModule?e:{default:e}}(n(90)),o=(n(144),n(292));n(315);t.default=(0,o.connect)(function(e){return{auth:e.auth,isAuthenticated:e.auth.isAuthenticated}},function(e){return{}})(function(e){return e.isAuthenticated||"0.0.0.0"===window.location.hostname?function(e){return r.default.createElement("header",{className:"navbar"},r.default.createElement("section",{className:"navbar-section first-navbar-section"},r.default.createElement("a",{href:"/"},r.default.createElement("img",{className:"menuToggle",alt:"logo",src:"/static/vframe-logo.png"})),r.default.createElement("a",{href:"/",className:"vcat-btn"},r.default.createElement("b",null,"VCAT")),r.default.createElement("a",{href:"/categories/"},"Categories"),r.default.createElement("a",{href:"/images/new/"},"Upload"),r.default.createElement("a",{href:"/search/"},"Search")),r.default.createElement("section",{className:"navbar-section last-navbar-section"},r.default.createElement("a",{href:"/stats/hierarchy.html",className:""},"Stats"),r.default.createElement("a",{href:"/help/"},"Help"),r.default.createElement("span",{className:"login-btn logged-in capitalize"},e.auth.user.username,r.default.createElement("a",{href:"/accounts/logout/"},"Logout")),r.default.createElement("a",{href:"/groups/user/"},"My Assignments")))}(e):r.default.createElement("header",{className:"navbar"},r.default.createElement("section",{className:"navbar-section"},r.default.createElement("a",{href:"/"},r.default.createElement("img",{className:"menuToggle",alt:"logo",src:"/static/vframe-logo.png"})),r.default.createElement("a",{href:"/",className:"vcat-btn"},r.default.createElement("b",null,"VCAT"))),r.default.createElement("section",{className:"navbar-section last-navbar-section"},r.default.createElement("span",{className:""},r.default.createElement("a",{href:"/accounts/login",className:""},"Login"))))})},function(e,t,n){"use strict"; -/** @license React v16.5.2 - * react.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var r=n(289),o="function"==typeof Symbol&&Symbol.for,i=o?Symbol.for("react.element"):60103,a=o?Symbol.for("react.portal"):60106,u=o?Symbol.for("react.fragment"):60107,s=o?Symbol.for("react.strict_mode"):60108,c=o?Symbol.for("react.profiler"):60114,l=o?Symbol.for("react.provider"):60109,f=o?Symbol.for("react.context"):60110,d=o?Symbol.for("react.async_mode"):60111,p=o?Symbol.for("react.forward_ref"):60112;o&&Symbol.for("react.placeholder");var h="function"==typeof Symbol&&Symbol.iterator;function m(e){for(var t=arguments.length-1,n="https://reactjs.org/docs/error-decoder.html?invariant="+e,r=0;r<t;r++)n+="&args[]="+encodeURIComponent(arguments[r+1]);!function(e,t,n,r,o,i,a,u){if(!e){if(e=void 0,void 0===t)e=Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings.");else{var s=[n,r,o,i,a,u],c=0;(e=Error(t.replace(/%s/g,function(){return s[c++]}))).name="Invariant Violation"}throw e.framesToPop=1,e}}(!1,"Minified React error #"+e+"; visit %s for the full message or use the non-minified dev environment for full errors and additional helpful warnings. ",n)}var v={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},y={};function g(e,t,n){this.props=e,this.context=t,this.refs=y,this.updater=n||v}function _(){}function b(e,t,n){this.props=e,this.context=t,this.refs=y,this.updater=n||v}g.prototype.isReactComponent={},g.prototype.setState=function(e,t){"object"!=typeof e&&"function"!=typeof e&&null!=e&&m("85"),this.updater.enqueueSetState(this,e,t,"setState")},g.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")},_.prototype=g.prototype;var w=b.prototype=new _;w.constructor=b,r(w,g.prototype),w.isPureReactComponent=!0;var x={current:null,currentDispatcher:null},E=Object.prototype.hasOwnProperty,O={key:!0,ref:!0,__self:!0,__source:!0};function S(e,t,n){var r=void 0,o={},a=null,u=null;if(null!=t)for(r in void 0!==t.ref&&(u=t.ref),void 0!==t.key&&(a=""+t.key),t)E.call(t,r)&&!O.hasOwnProperty(r)&&(o[r]=t[r]);var s=arguments.length-2;if(1===s)o.children=n;else if(1<s){for(var c=Array(s),l=0;l<s;l++)c[l]=arguments[l+2];o.children=c}if(e&&e.defaultProps)for(r in s=e.defaultProps)void 0===o[r]&&(o[r]=s[r]);return{$$typeof:i,type:e,key:a,ref:u,props:o,_owner:x.current}}function T(e){return"object"==typeof e&&null!==e&&e.$$typeof===i}var k=/\/+/g,R=[];function j(e,t,n,r){if(R.length){var o=R.pop();return o.result=e,o.keyPrefix=t,o.func=n,o.context=r,o.count=0,o}return{result:e,keyPrefix:t,func:n,context:r,count:0}}function P(e){e.result=null,e.keyPrefix=null,e.func=null,e.context=null,e.count=0,10>R.length&&R.push(e)}function C(e,t,n){return null==e?0:function e(t,n,r,o){var u=typeof t;"undefined"!==u&&"boolean"!==u||(t=null);var s=!1;if(null===t)s=!0;else switch(u){case"string":case"number":s=!0;break;case"object":switch(t.$$typeof){case i:case a:s=!0}}if(s)return r(o,t,""===n?"."+M(t,0):n),1;if(s=0,n=""===n?".":n+":",Array.isArray(t))for(var c=0;c<t.length;c++){var l=n+M(u=t[c],c);s+=e(u,l,r,o)}else if(l=null===t||"object"!=typeof t?null:"function"==typeof(l=h&&t[h]||t["@@iterator"])?l:null,"function"==typeof l)for(t=l.call(t),c=0;!(u=t.next()).done;)s+=e(u=u.value,l=n+M(u,c++),r,o);else"object"===u&&m("31","[object Object]"==(r=""+t)?"object with keys {"+Object.keys(t).join(", ")+"}":r,"");return s}(e,"",t,n)}function M(e,t){return"object"==typeof e&&null!==e&&null!=e.key?function(e){var t={"=":"=0",":":"=2"};return"$"+(""+e).replace(/[=:]/g,function(e){return t[e]})}(e.key):t.toString(36)}function I(e,t){e.func.call(e.context,t,e.count++)}function A(e,t,n){var r=e.result,o=e.keyPrefix;e=e.func.call(e.context,t,e.count++),Array.isArray(e)?D(e,r,n,function(e){return e}):null!=e&&(T(e)&&(e=function(e,t){return{$$typeof:i,type:e.type,key:t,ref:e.ref,props:e.props,_owner:e._owner}}(e,o+(!e.key||t&&t.key===e.key?"":(""+e.key).replace(k,"$&/")+"/")+n)),r.push(e))}function D(e,t,n,r,o){var i="";null!=n&&(i=(""+n).replace(k,"$&/")+"/"),C(e,A,t=j(t,i,r,o)),P(t)}var N={Children:{map:function(e,t,n){if(null==e)return e;var r=[];return D(e,r,null,t,n),r},forEach:function(e,t,n){if(null==e)return e;C(e,I,t=j(null,null,t,n)),P(t)},count:function(e){return C(e,function(){return null},null)},toArray:function(e){var t=[];return D(e,t,null,function(e){return e}),t},only:function(e){return T(e)||m("143"),e}},createRef:function(){return{current:null}},Component:g,PureComponent:b,createContext:function(e,t){return void 0===t&&(t=null),(e={$$typeof:f,_calculateChangedBits:t,_currentValue:e,_currentValue2:e,Provider:null,Consumer:null,unstable_read:null}).Provider={$$typeof:l,_context:e},e.Consumer=e,e.unstable_read=function(e,t){var n=x.currentDispatcher;return null===n&&m("277"),n.readContext(e,t)}.bind(null,e),e},forwardRef:function(e){return{$$typeof:p,render:e}},Fragment:u,StrictMode:s,unstable_AsyncMode:d,unstable_Profiler:c,createElement:S,cloneElement:function(e,t,n){(null===e||void 0===e)&&m("267",e);var o=void 0,a=r({},e.props),u=e.key,s=e.ref,c=e._owner;if(null!=t){void 0!==t.ref&&(s=t.ref,c=x.current),void 0!==t.key&&(u=""+t.key);var l=void 0;for(o in e.type&&e.type.defaultProps&&(l=e.type.defaultProps),t)E.call(t,o)&&!O.hasOwnProperty(o)&&(a[o]=void 0===t[o]&&void 0!==l?l[o]:t[o])}if(1===(o=arguments.length-2))a.children=n;else if(1<o){l=Array(o);for(var f=0;f<o;f++)l[f]=arguments[f+2];a.children=l}return{$$typeof:i,type:e.type,key:u,ref:s,props:a,_owner:c}},createFactory:function(e){var t=S.bind(null,e);return t.type=e,t},isValidElement:T,version:"16.5.2",__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED:{ReactCurrentOwner:x,assign:r}},L={default:N},U=L&&N||L;e.exports=U.default||U},function(e,t,n){"use strict"; -/* -object-assign -(c) Sindre Sorhus -@license MIT -*/var r=Object.getOwnPropertySymbols,o=Object.prototype.hasOwnProperty,i=Object.prototype.propertyIsEnumerable;e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map(function(e){return t[e]}).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach(function(e){r[e]=e}),"abcdefghijklmnopqrst"===Object.keys(Object.assign({},r)).join("")}catch(e){return!1}}()?Object.assign:function(e,t){for(var n,a,u=function(e){if(null===e||void 0===e)throw new TypeError("Object.assign cannot be called with null or undefined");return Object(e)}(e),s=1;s<arguments.length;s++){for(var c in n=Object(arguments[s]))o.call(n,c)&&(u[c]=n[c]);if(r){a=r(n);for(var l=0;l<a.length;l++)i.call(n,a[l])&&(u[a[l]]=n[a[l]])}}return u}},function(e,t,n){"use strict";(function(e,r){var o,i=n(291);o="undefined"!=typeof self?self:"undefined"!=typeof window?window:void 0!==e?e:r;var a=Object(i.a)(o);t.a=a}).call(t,n(3),n(119)(e))},function(e,t,n){"use strict";t.a=function(e){var t,n=e.Symbol;"function"==typeof n?n.observable?t=n.observable:(t=n("observable"),n.observable=t):t="@@observable";return t}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=n(293),o=n(147),i=n(299);n.d(t,"Provider",function(){return r.b}),n.d(t,"createProvider",function(){return r.a}),n.d(t,"connectAdvanced",function(){return o.a}),n.d(t,"connect",function(){return i.a})},function(e,t,n){"use strict";t.a=u;var r=n(90),o=(n.n(r),n(145)),i=n.n(o),a=n(146);n(91);function u(){var e,t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"store",n=arguments[1]||t+"Subscription",o=function(e){function o(n,r){!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,o);var i=function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}(this,e.call(this,n,r));return i[t]=n.store,i}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(o,e),o.prototype.getChildContext=function(){var e;return(e={})[t]=this[t],e[n]=null,e},o.prototype.render=function(){return r.Children.only(this.props.children)},o}(r.Component);return o.propTypes={store:a.a.isRequired,children:i.a.element.isRequired},o.childContextTypes=((e={})[t]=a.a.isRequired,e[n]=a.b,e),o}t.b=u()},function(e,t,n){"use strict";var r=n(295);function o(){}e.exports=function(){function e(e,t,n,o,i,a){if(a!==r){var u=new Error("Calling PropTypes validators directly is not supported by the `prop-types` package. Use PropTypes.checkPropTypes() to call them. Read more at http://fb.me/use-check-prop-types");throw u.name="Invariant Violation",u}}function t(){return e}e.isRequired=e;var n={array:e,bool:e,func:e,number:e,object:e,string:e,symbol:e,any:e,arrayOf:t,element:e,instanceOf:t,node:e,objectOf:t,oneOf:t,oneOfType:t,shape:t,exact:t};return n.checkPropTypes=o,n.PropTypes=n,n}},function(e,t,n){"use strict";e.exports="SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED"},function(e,t,n){"use strict";var r={childContextTypes:!0,contextTypes:!0,defaultProps:!0,displayName:!0,getDefaultProps:!0,getDerivedStateFromProps:!0,mixins:!0,propTypes:!0,type:!0},o={name:!0,length:!0,prototype:!0,caller:!0,callee:!0,arguments:!0,arity:!0},i=Object.defineProperty,a=Object.getOwnPropertyNames,u=Object.getOwnPropertySymbols,s=Object.getOwnPropertyDescriptor,c=Object.getPrototypeOf,l=c&&c(Object);e.exports=function e(t,n,f){if("string"!=typeof n){if(l){var d=c(n);d&&d!==l&&e(t,d,f)}var p=a(n);u&&(p=p.concat(u(n)));for(var h=0;h<p.length;++h){var m=p[h];if(!(r[m]||o[m]||f&&f[m])){var v=s(n,m);try{i(t,m,v)}catch(e){}}}return t}return t}},function(e,t,n){"use strict";e.exports=function(e,t,n,r,o,i,a,u){if(!e){var s;if(void 0===t)s=new Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings.");else{var c=[n,r,o,i,a,u],l=0;(s=new Error(t.replace(/%s/g,function(){return c[l++]}))).name="Invariant Violation"}throw s.framesToPop=1,s}}},function(e,t,n){"use strict";n.d(t,"a",function(){return i});var r=null,o={notify:function(){}};var i=function(){function e(t,n,r){!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,e),this.store=t,this.parentSub=n,this.onStateChange=r,this.unsubscribe=null,this.listeners=o}return e.prototype.addNestedSub=function(e){return this.trySubscribe(),this.listeners.subscribe(e)},e.prototype.notifyNestedSubs=function(){this.listeners.notify()},e.prototype.isSubscribed=function(){return Boolean(this.unsubscribe)},e.prototype.trySubscribe=function(){this.unsubscribe||(this.unsubscribe=this.parentSub?this.parentSub.addNestedSub(this.onStateChange):this.store.subscribe(this.onStateChange),this.listeners=function(){var e=[],t=[];return{clear:function(){t=r,e=r},notify:function(){for(var n=e=t,r=0;r<n.length;r++)n[r]()},get:function(){return t},subscribe:function(n){var o=!0;return t===e&&(t=e.slice()),t.push(n),function(){o&&e!==r&&(o=!1,t===e&&(t=e.slice()),t.splice(t.indexOf(n),1))}}}}())},e.prototype.tryUnsubscribe=function(){this.unsubscribe&&(this.unsubscribe(),this.unsubscribe=null,this.listeners.clear(),this.listeners=o)},e}()},function(e,t,n){"use strict";var r=n(147),o=n(300),i=n(301),a=n(311),u=n(312),s=n(313),c=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};function l(e,t,n){for(var r=t.length-1;r>=0;r--){var o=t[r](e);if(o)return o}return function(t,r){throw new Error("Invalid value of type "+typeof e+" for "+n+" argument when connecting component "+r.wrappedComponentName+".")}}function f(e,t){return e===t}t.a=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.connectHOC,n=void 0===t?r.a:t,d=e.mapStateToPropsFactories,p=void 0===d?a.a:d,h=e.mapDispatchToPropsFactories,m=void 0===h?i.a:h,v=e.mergePropsFactories,y=void 0===v?u.a:v,g=e.selectorFactory,_=void 0===g?s.a:g;return function(e,t,r){var i=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{},a=i.pure,u=void 0===a||a,s=i.areStatesEqual,d=void 0===s?f:s,h=i.areOwnPropsEqual,v=void 0===h?o.a:h,g=i.areStatePropsEqual,b=void 0===g?o.a:g,w=i.areMergedPropsEqual,x=void 0===w?o.a:w,E=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(i,["pure","areStatesEqual","areOwnPropsEqual","areStatePropsEqual","areMergedPropsEqual"]),O=l(e,p,"mapStateToProps"),S=l(t,m,"mapDispatchToProps"),T=l(r,y,"mergeProps");return n(_,c({methodName:"connect",getDisplayName:function(e){return"Connect("+e+")"},shouldHandleStateChanges:Boolean(e),initMapStateToProps:O,initMapDispatchToProps:S,initMergeProps:T,pure:u,areStatesEqual:d,areOwnPropsEqual:v,areStatePropsEqual:b,areMergedPropsEqual:x},E))}}()},function(e,t,n){"use strict";t.a=function(e,t){if(o(e,t))return!0;if("object"!=typeof e||null===e||"object"!=typeof t||null===t)return!1;var n=Object.keys(e),i=Object.keys(t);if(n.length!==i.length)return!1;for(var a=0;a<n.length;a++)if(!r.call(t,n[a])||!o(e[n[a]],t[n[a]]))return!1;return!0};var r=Object.prototype.hasOwnProperty;function o(e,t){return e===t?0!==e||0!==t||1/e==1/t:e!=e&&t!=t}},function(e,t,n){"use strict";var r=n(144),o=n(148);t.a=[function(e){return"function"==typeof e?Object(o.b)(e,"mapDispatchToProps"):void 0},function(e){return e?void 0:Object(o.a)(function(e){return{dispatch:e}})},function(e){return e&&"object"==typeof e?Object(o.a)(function(t){return Object(r.bindActionCreators)(e,t)}):void 0}]},function(e,t,n){"use strict";var r=n(303),o=n(308),i=n(310),a="[object Object]",u=Function.prototype,s=Object.prototype,c=u.toString,l=s.hasOwnProperty,f=c.call(Object);t.a=function(e){if(!Object(i.a)(e)||Object(r.a)(e)!=a)return!1;var t=Object(o.a)(e);if(null===t)return!0;var n=l.call(t,"constructor")&&t.constructor;return"function"==typeof n&&n instanceof n&&c.call(n)==f}},function(e,t,n){"use strict";var r=n(150),o=n(306),i=n(307),a="[object Null]",u="[object Undefined]",s=r.a?r.a.toStringTag:void 0;t.a=function(e){return null==e?void 0===e?u:a:s&&s in Object(e)?Object(o.a)(e):Object(i.a)(e)}},function(e,t,n){"use strict";var r=n(305),o="object"==typeof self&&self&&self.Object===Object&&self,i=r.a||o||Function("return this")();t.a=i},function(e,t,n){"use strict";(function(e){var n="object"==typeof e&&e&&e.Object===Object&&e;t.a=n}).call(t,n(3))},function(e,t,n){"use strict";var r=n(150),o=Object.prototype,i=o.hasOwnProperty,a=o.toString,u=r.a?r.a.toStringTag:void 0;t.a=function(e){var t=i.call(e,u),n=e[u];try{e[u]=void 0;var r=!0}catch(e){}var o=a.call(e);return r&&(t?e[u]=n:delete e[u]),o}},function(e,t,n){"use strict";var r=Object.prototype.toString;t.a=function(e){return r.call(e)}},function(e,t,n){"use strict";var r=n(309),o=Object(r.a)(Object.getPrototypeOf,Object);t.a=o},function(e,t,n){"use strict";t.a=function(e,t){return function(n){return e(t(n))}}},function(e,t,n){"use strict";t.a=function(e){return null!=e&&"object"==typeof e}},function(e,t,n){"use strict";var r=n(148);t.a=[function(e){return"function"==typeof e?Object(r.b)(e,"mapStateToProps"):void 0},function(e){return e?void 0:Object(r.a)(function(){return{}})}]},function(e,t,n){"use strict";n(149);var r=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};function o(e,t,n){return r({},n,e,t)}t.a=[function(e){return"function"==typeof e?function(e){return function(t,n){n.displayName;var r=n.pure,o=n.areMergedPropsEqual,i=!1,a=void 0;return function(t,n,u){var s=e(t,n,u);return i?r&&o(s,a)||(a=s):(i=!0,a=s),a}}}(e):void 0},function(e){return e?void 0:function(){return o}}]},function(e,t,n){"use strict";t.a=function(e,t){var n=t.initMapStateToProps,i=t.initMapDispatchToProps,a=t.initMergeProps,u=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(t,["initMapStateToProps","initMapDispatchToProps","initMergeProps"]),s=n(e,u),c=i(e,u),l=a(e,u);0;return(u.pure?o:r)(s,c,l,e,u)};n(314);function r(e,t,n,r){return function(o,i){return n(e(o,i),t(r,i),i)}}function o(e,t,n,r,o){var i=o.areStatesEqual,a=o.areOwnPropsEqual,u=o.areStatePropsEqual,s=!1,c=void 0,l=void 0,f=void 0,d=void 0,p=void 0;function h(o,s){var h=!a(s,l),m=!i(o,c);return c=o,l=s,h&&m?(f=e(c,l),t.dependsOnOwnProps&&(d=t(r,l)),p=n(f,d,l)):h?(e.dependsOnOwnProps&&(f=e(c,l)),t.dependsOnOwnProps&&(d=t(r,l)),p=n(f,d,l)):m?function(){var t=e(c,l),r=!u(t,f);return f=t,r&&(p=n(f,d,l)),p}():p}return function(o,i){return s?h(o,i):function(o,i){return f=e(c=o,l=i),d=t(r,l),p=n(f,d,l),s=!0,p}(o,i)}}},function(e,t,n){"use strict";n(91)},function(e,t,n){var r=n(316);"string"==typeof r&&(r=[[e.i,r,""]]);var o={hmr:!0,transform:void 0,insertInto:void 0};n(49)(r,o);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(48)(!1)).push([e.i,".navbar {\n height: 50px;\n color:#aaa;\n font-size: 14px;\n}\n.navbar b {\n font-weight: 900;\n}\n.navbar {\n background: #11F;\n color: white;\n font-family: 'Helvetica', sans-serif;\n}\n.navbar a {\n color: rgba(255,255,255,0.89);\n text-decoration: none;\n line-height: 18px;\n font-size: 14px;\n font-weight: 500;\n}\n.navbar section.first-navbar-section * {\n font-weight: bold;\n}\n.navbar section.first-navbar-section > * {\n padding: 5px 8px 5px 8px;\n}\n.navbar section > * {\n padding: 5px 5px;\n}\n.navbar section.first-navbar-section > .vcat-btn {\n font-size: 16px;\n padding-left: 0;\n}\n.navbar .btn-link:focus,\n.navbar .btn-link:hover,\n.navbar .btn-link:active,\n.navbar a:focus,\n.navbar a:hover,\n.navbar a:active {\n text-decoration: none;\n color: white;\n}\n.menubar a:focus,\n.menubar a:hover,\n.menubar a:active {\n color: white;\n}\n.menuToggle {\n width: 26px;\n height: 26px;\n cursor: pointer;\n margin: 0 0 0 5px;\n line-height: 1;\n}\n.navbar a.navbar-brand {\n font-size: .8rem;\n}\n\n.navbar .last-navbar-section {\n padding-right: 8px;\n}\n.navbar .logout {\n padding: 0 .25rem;\n}\n.navbar .logged-in {\n border-left: 1px solid #99f;\n margin-left: .25rem;\n padding: .25rem .25rem .25rem .75rem;\n user-select: none;\n text-transform: capitalize;\n color: rgba(255,255,255,0.89);\n position: relative;\n min-width: 65px;\n}\n.navbar .logged-in a {\n position: absolute;\n top: 0; left: 0;\n width: 100%;\n height: 100%;\n display: flex;\n justify-content: center;\n align-items: center;\n background: #11f;\n opacity: 0;\n transition: 0.1s all;\n}\n.navbar .logged-in:hover a {\n opacity: 1;\n}\n",""])},function(e,t){e.exports=function(e){var t="undefined"!=typeof window&&window.location;if(!t)throw new Error("fixUrls requires window.location");if(!e||"string"!=typeof e)return e;var n=t.protocol+"//"+t.host,r=n+t.pathname.replace(/\/[^\/]*$/,"/");return e.replace(/url\s*\(((?:[^)(]|\((?:[^)(]+|\([^)(]*\))*\))*)\)/gi,function(e,t){var o,i=t.trim().replace(/^"(.*)"$/,function(e,t){return t}).replace(/^'(.*)'$/,function(e,t){return t});return/^(#|data:|http:\/\/|https:\/\/|file:\/\/\/|\s*$)/i.test(i)?e:(o=0===i.indexOf("//")?i:0===i.indexOf("/")?n+i:r+i.replace(/^\.\//,""),"url("+JSON.stringify(o)+")")})}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(e){var t=e.to,n=e.className,i=void 0===n?"navlink":n,a=e.children;return r.default.createElement("span",{className:i},r.default.createElement(o.NavLink,{to:t},a))};var r=function(e){return e&&e.__esModule?e:{default:e}}(n(1)),o=n(16)},function(e,t,n){"use strict";var r=n(151),o=n.n(r),i=n(1),a=n.n(i),u=n(12),s=n.n(u),c=n(27),l=n(92);function f(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}var d=function(e){function t(){var n,r;!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t);for(var o=arguments.length,i=Array(o),a=0;a<o;a++)i[a]=arguments[a];return n=r=f(this,e.call.apply(e,[this].concat(i))),r.history=Object(c.createBrowserHistory)(r.props),f(r,n)}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.componentWillMount=function(){o()(!this.props.history,"<BrowserRouter> ignores the history prop. To use a custom history, use `import { Router }` instead of `import { BrowserRouter as Router }`.")},t.prototype.render=function(){return a.a.createElement(l.a,{history:this.history,children:this.props.children})},t}(a.a.Component);d.propTypes={basename:s.a.string,forceRefresh:s.a.bool,getUserConfirmation:s.a.func,keyLength:s.a.number,children:s.a.node},t.a=d},function(e,t,n){"use strict";var r=n(151),o=n.n(r),i=n(1),a=n.n(i),u=n(12),s=n.n(u),c=n(27),l=n(92);function f(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}var d=function(e){function t(){var n,r;!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t);for(var o=arguments.length,i=Array(o),a=0;a<o;a++)i[a]=arguments[a];return n=r=f(this,e.call.apply(e,[this].concat(i))),r.history=Object(c.createHashHistory)(r.props),f(r,n)}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.componentWillMount=function(){o()(!this.props.history,"<HashRouter> ignores the history prop. To use a custom history, use `import { Router }` instead of `import { HashRouter as Router }`.")},t.prototype.render=function(){return a.a.createElement(l.a,{history:this.history,children:this.props.children})},t}(a.a.Component);d.propTypes={basename:s.a.string,getUserConfirmation:s.a.func,hashType:s.a.oneOf(["hashbang","noslash","slash"]),children:s.a.node},t.a=d},function(e,t,n){"use strict";var r=n(136);t.a=r.a},function(e,t,n){"use strict";var r=n(1),o=n.n(r),i=n(12),a=n.n(i),u=n(153),s=n(152),c=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},l="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e};var f=function(e){var t=e.to,n=e.exact,r=e.strict,i=e.location,a=e.activeClassName,f=e.className,d=e.activeStyle,p=e.style,h=e.isActive,m=e["aria-current"],v=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(e,["to","exact","strict","location","activeClassName","className","activeStyle","style","isActive","aria-current"]),y="object"===(void 0===t?"undefined":l(t))?t.pathname:t,g=y&&y.replace(/([.+*?=^!:${}()[\]|/\\])/g,"\\$1");return o.a.createElement(u.a,{path:g,exact:n,strict:r,location:i,children:function(e){var n=e.location,r=e.match,i=!!(h?h(r,n):r);return o.a.createElement(s.a,c({to:t,className:i?[f,a].filter(function(e){return e}).join(" "):f,style:i?c({},p,d):p,"aria-current":i&&m||null},v))}})};f.propTypes={to:s.a.propTypes.to,exact:a.a.bool,strict:a.a.bool,location:a.a.object,activeClassName:a.a.string,className:a.a.string,activeStyle:a.a.object,style:a.a.object,isActive:a.a.func,"aria-current":a.a.oneOf(["page","step","location","date","time","true"])},f.defaultProps={activeClassName:"active","aria-current":"page"},t.a=f},function(e,t,n){"use strict";var r=n(138);t.a=r.a},function(e,t,n){"use strict";var r=n(139);t.a=r.a},function(e,t,n){"use strict";var r=n(141);t.a=r.a},function(e,t,n){"use strict";var r=n(142);t.a=r.a},function(e,t,n){"use strict";var r=n(88);t.a=r.a},function(e,t,n){"use strict";var r=n(64);t.a=r.a},function(e,t,n){"use strict";var r=n(143);t.a=r.a},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=p(n(28)),o=p(n(5)),i=p(n(6)),a=p(n(7)),u=p(n(8)),s=p(n(9)),c=n(1),l=p(c),f=n(17),d=n(11);function p(e){return e&&e.__esModule?e:{default:e}}var h=function(e){function t(){return(0,i.default)(this,t),(0,u.default)(this,(t.__proto__||(0,o.default)(t)).apply(this,arguments))}return(0,s.default)(t,e),(0,a.default)(t,[{key:"render",value:function(){var e=this.props,t=e.tag,n=e.sha256,o=e.verified,i=e.keyframes,a=void 0===i?{}:i,u=e.labels,s=e.summary,c=e.aspectRatio,p=void 0===c?1.777:c,h=e.showAll,m=0,v=(0,r.default)(a).map(function(e){return parseInt(e,10)}),y=v.sort(function(e,t){return e-t}).map(function(e){var t=a[e];return t.length||h?(m+=t.length,{frame:e,detections:t}):null}).filter(function(e){return!!e}),g=y.reduce(function(e,t){return t.detections.reduce(function(e,t){var n=t.idx;return n in e||(e[n]=[u[n],0]),e[n][1]+=1,e},e),e},{}),_=(0,r.default)(g).map(function(e){return g[e]}).sort(function(e,t){return t[1]-e[1]});return s?l.default.createElement("div",null,l.default.createElement("h3",null,t," Detections"),l.default.createElement(d.TableTuples,{list:_})):l.default.createElement("div",null,l.default.createElement("h2",null,t),l.default.createElement("h3",null,"Detections"),l.default.createElement(d.TableTuples,{list:_}),l.default.createElement("h3",null,"Frames"),l.default.createElement("ul",{className:"meta"},l.default.createElement("li",null,"Displaying ",y.length," / ",(0,f.courtesyS)(v.length,"frame")),l.default.createElement("li",null,(0,f.courtesyS)(m,"detection")," found")),l.default.createElement("div",{className:"thumbnails"},y.map(function(e){var t=e.frame,r=e.detections;return l.default.createElement(d.Keyframe,{key:t,sha256:n,frame:t,verified:o,size:"th",showFrame:!0,showTimestamp:!0,aspectRatio:p,detectionList:[{labels:u,detections:r}]},l.default.createElement(d.DetectionList,{labels:u,detections:r,width:160,height:90}))})))}}]),t}(c.Component);t.default=h},function(e,t,n){n(332),e.exports=n(10).Object.keys},function(e,t,n){var r=n(41),o=n(45);n(124)("keys",function(){return function(e){return o(r(e))}})},function(e,t,n){var r=n(10),o=r.JSON||(r.JSON={stringify:JSON.stringify});e.exports=function(e){return o.stringify.apply(o,arguments)}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(e){var t=e.detections,n=e.width,i=e.height;return t.map(function(e,t){var a=e.rect;return a&&r.default.createElement("div",{className:"rect",key:t,style:{left:(0,o.px)(a[0],n),top:(0,o.px)(a[1],i),width:(0,o.px)(a[2]-a[0],n),height:(0,o.px)(a[3]-a[1],i)}})})};var r=function(e){return e&&e.__esModule?e:{default:e}}(n(1)),o=n(17)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(e){var t=e.detections,n=e.labels,o=e.tag,i=e.showEmpty;return r.default.createElement("span",{className:"detectionList"},o&&r.default.createElement("h3",null,o),!t.length&&i&&r.default.createElement("label",null,r.default.createElement("small",null,"No detections")),t.map(function(e,t){var o=e.idx,i=e.score;e.rect;return r.default.createElement("label",{key:t},r.default.createElement("small",{className:"title"},(n[o]||"Unknown").replace(/_/," ")),r.default.createElement("small",null,i.toFixed(2)))}))};var r=function(e){return e&&e.__esModule?e:{default:e}}(n(1))},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(e){return r.default.createElement("footer",null)};var r=function(e){return e&&e.__esModule?e:{default:e}}(n(1));n(16),n(2)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(){return r.default.createElement("div",{className:"loaderWrapper"},r.default.createElement("div",{className:"loader"}))};var r=function(e){return e&&e.__esModule?e:{default:e}}(n(1))},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=d(n(5)),o=d(n(6)),i=d(n(7)),a=d(n(8)),u=d(n(9)),s=n(1),c=d(s),l=n(16),f=n(2);function d(e){return e&&e.__esModule?e:{default:e}}var p=function(e){function t(){return(0,o.default)(this,t),(0,a.default)(this,(t.__proto__||(0,r.default)(t)).apply(this,arguments))}return(0,u.default)(t,e),(0,i.default)(t,[{key:"render",value:function(){var e=this.props.hash;return e?c.default.createElement("div",{className:"sidebar"},c.default.createElement("h4",null,"Media"),c.default.createElement(l.NavLink,{to:"/metadata/"+e+"/summary/"},"Summary"),c.default.createElement(l.NavLink,{to:"/metadata/"+e+"/mediaRecord/"},"Media Record"),c.default.createElement(l.NavLink,{to:"/metadata/"+e+"/mediaInfo/"},"Media Info"),c.default.createElement(l.NavLink,{to:"/metadata/"+e+"/sugarcube/"},"Sugarcube"),c.default.createElement("h4",null,"Keyframes"),c.default.createElement(l.NavLink,{to:"/metadata/"+e+"/keyframe/"},"Keyframe"),c.default.createElement("h4",null,"Detectors"),c.default.createElement(l.NavLink,{to:"/metadata/"+e+"/places365/"},"Places 365"),c.default.createElement(l.NavLink,{to:"/metadata/"+e+"/coco/"},"Coco")):c.default.createElement("div",{className:"sidebar"})}}]),t}(s.Component);t.default=(0,f.connect)(function(e){return{hash:e.metadata.hash}})(p)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=a(n(4)),o=a(n(1)),i=n(2);function a(e){return e&&e.__esModule?e:{default:e}}t.default=(0,i.connect)(function(e){return{app:e.metadata}})(function(e){var t=e.app,n=e.tag,i=e.View,a=t[n];return a?"loading"===a?o.default.createElement("div",{className:"tableObject loading"},n,": Loading"):a.err?o.default.createElement("div",{className:"tableObject error"},n," Error: ",a.err):o.default.createElement(i,(0,r.default)({data:a},e)):null})},function(e,t,n){e.exports={default:n(341),__esModule:!0}},function(e,t,n){n(342),e.exports=n(10).Object.assign},function(e,t,n){var r=n(18);r(r.S+r.F,"Object",{assign:n(343)})},function(e,t,n){"use strict";var r=n(45),o=n(85),i=n(59),a=n(41),u=n(130),s=Object.assign;e.exports=!s||n(35)(function(){var e={},t={},n=Symbol(),r="abcdefghijklmnopqrst";return e[n]=7,r.split("").forEach(function(e){t[e]=e}),7!=s({},e)[n]||Object.keys(s({},t)).join("")!=r})?function(e,t){for(var n=a(e),s=arguments.length,c=1,l=o.f,f=i.f;s>c;)for(var d,p=u(arguments[c++]),h=l?r(p).concat(l(p)):r(p),m=h.length,v=0;m>v;)f.call(p,d=h[v++])&&(n[d]=p[d]);return n}:s},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(e){var t=e.verified,n=e.sha256,c=e.frame,l=e.score,f=e.isSaved,d=e.fps,p=void 0===d?25:d,h=e.size,m=void 0===h?"th":h,v=e.className,y=e.showHash,g=e.showFrame,_=e.showTimestamp,b=e.showScore,w=e.showSearchButton,x=e.showSaveButton,E=e.to,O=e.children,S=e.detectionList,T=void 0===S?[]:S,k=e.aspectRatio,R=void 0===k?1.777:k,j=e.onClick,P=e.reviewActions;if(!n)return null;var C=i.widths[m],M=Math.round(C/R);return r.default.createElement("div",{className:(v||"keyframe")+(f?" isSaved":"")},r.default.createElement("div",{className:"thumbnail"},r.default.createElement(s,{to:E||(0,i.keyframeUri)(n,c),onClick:j},r.default.createElement("img",{alt:"Frame #"+c,src:(0,i.imageUrl)(t,n,c,m),width:C,height:M,onClick:j}),T.map(function(e,t){var n=e.labels,o=e.detections;return r.default.createElement(a.DetectionBoxes,{key:t,labels:n,detections:o,width:C,height:M})})),P&&(w||x)&&r.default.createElement("label",{className:"searchButtons"},w&&r.default.createElement(o.Link,{to:u.publicUrl.searchByVerifiedFrame(t,n,c),className:"btn"},"Search"),x&&(f?r.default.createElement("button",{onClick:function(){return P.unsave({hash:n,frame:c,verified:t})},className:"btn btn-primary saved"},"Saved"):r.default.createElement("button",{onClick:function(){return P.save({hash:n,frame:c,verified:t})},className:"btn btn save"},"Save")))),(y||g||_||b)&&r.default.createElement("label",null,y&&r.default.createElement("small",null,r.default.createElement(o.Link,{to:u.publicUrl.browse(n)},r.default.createElement("span",{title:n,className:"sha256 "+(0,i.verify)(t)},"▶ ",n.substr(0,6)))),g&&r.default.createElement("small",null,r.default.createElement("span",null,"Frame #",c)),_&&r.default.createElement("small",null,(0,i.timestamp)(c,p)),b&&!!l&&r.default.createElement("small",null,l)),O)};var r=function(e){return e&&e.__esModule?e:{default:e}}(n(1)),o=n(16),i=n(17),a=n(11),u=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(21));var s=function(e){return e.onClick?e.children:e.to.match(/^http/)?r.default.createElement("a",{href:e.to,target:"_blank",rel:"noopener noreferrer"},e.children):r.default.createElement(o.Link,e)}},function(e,t,n){"use strict";function r(e){return function(t){var n=t.dispatch,r=t.getState;return function(t){return function(o){return"function"==typeof o?o(n,r,e):t(o)}}}}Object.defineProperty(t,"__esModule",{value:!0});var o=r();o.withExtraArgument=r,t.default=o},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=a(n(65)),o=a(n(4));t.default=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:u,t=arguments[1];switch(t.type){case i.metadata.set_hash:return e=(0,o.default)({},e,{hash:t.hash});case i.metadata.loading:return(0,o.default)({},e,(0,r.default)({},t.tag,"loading"));case i.metadata.loaded:return(0,o.default)({},e,(0,r.default)({},t.tag,t.data));case i.metadata.loaded_many:return t.data.reduce(function(e,t){return e[t.name]=t.data||"error",e},(0,o.default)({},e,(0,r.default)({},t.tag,"loaded")));case i.metadata.error:return(0,o.default)({},e,(0,r.default)({},t.tag,t.err));default:return e}};var i=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(39));function a(e){return e&&e.__esModule?e:{default:e}}var u={}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=s(n(348)),o=s(n(65)),i=s(n(4));t.default=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:c(),t=arguments[1];switch(t.type){case a.search.loading:return"query"===t.tag&&t.offset?(0,i.default)({},e,{query:(0,i.default)({},e.query,{loadingMore:!0})}):(0,i.default)({},e,(0,o.default)({},t.tag,l[t.tag]||l.loading));case a.search.loaded:return"query"===t.tag&&t.offset?(0,i.default)({},e,{query:{query:t.data.query,results:[].concat((0,r.default)(e.query.results),(0,r.default)(t.data.results)),loadingMore:!1}}):(0,i.default)({},e,(0,o.default)({},t.tag,t.data));case a.search.error:return(0,i.default)({},e,(0,o.default)({},t.tag,{error:t.err}));case a.search.panic:return(0,i.default)({},c());case a.search.update_options:return u.default.setAll(t.opt),(0,i.default)({},e,{options:(0,i.default)({},t.opt)});default:return e}};var a=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(39)),u=s(n(158));function s(e){return e&&e.__esModule?e:{default:e}}var c=function(){return{query:{reset:!0},browse:{reset:!0},options:{thumbnailSize:(0,u.default)("thumbnailSize")||"th",perPage:parseInt((0,u.default)("perPage"),10)||50,groupByHash:(0,u.default)("groupByHash")}}},l={query:{query:{loading:!0},results:[]},loading:{loading:!0}}},function(e,t,n){"use strict";t.__esModule=!0;var r=function(e){return e&&e.__esModule?e:{default:e}}(n(154));t.default=function(e){if(Array.isArray(e)){for(var t=0,n=Array(e.length);t<e.length;t++)n[t]=e[t];return n}return(0,r.default)(e)}},function(e,t,n){n(44),n(350),e.exports=n(10).Array.from},function(e,t,n){"use strict";var r=n(34),o=n(18),i=n(41),a=n(155),u=n(156),s=n(81),c=n(351),l=n(95);o(o.S+o.F*!n(157)(function(e){Array.from(e)}),"Array",{from:function(e){var t,n,o,f,d=i(e),p="function"==typeof this?this:Array,h=arguments.length,m=h>1?arguments[1]:void 0,v=void 0!==m,y=0,g=l(d);if(v&&(m=r(m,h>2?arguments[2]:void 0,2)),void 0==g||p==Array&&u(g))for(n=new p(t=s(d.length));t>y;y++)c(n,y,v?m(d[y],y):d[y]);else for(f=g.call(d),n=new p;!(o=f.next()).done;y++)c(n,y,v?a(f,m,[o.value,y],!0):o.value);return n.length=y,n}})},function(e,t,n){"use strict";var r=n(22),o=n(43);e.exports=function(e,t,n){t in e?r.f(e,t,o(0,n)):e[t]=n}},function(e,t){!function(t,n){var r={version:"2.7.0",areas:{},apis:{},inherit:function(e,t){for(var n in e)t.hasOwnProperty(n)||(t[n]=e[n]);return t},stringify:function(e){return void 0===e||"function"==typeof e?e+"":JSON.stringify(e)},parse:function(e){try{return JSON.parse(e)}catch(t){return e}},fn:function(e,t){for(var n in r.storeAPI[e]=t,r.apis)r.apis[n][e]=t},get:function(e,t){return e.getItem(t)},set:function(e,t,n){e.setItem(t,n)},remove:function(e,t){e.removeItem(t)},key:function(e,t){return e.key(t)},length:function(e){return e.length},clear:function(e){e.clear()},Store:function(e,t,n){var o=r.inherit(r.storeAPI,function(e,t,n){return 0===arguments.length?o.getAll():"function"==typeof t?o.transact(e,t,n):void 0!==t?o.set(e,t,n):"string"==typeof e||"number"==typeof e?o.get(e):e?o.setAll(e,t):o.clear()});o._id=e;try{t.setItem("_safariPrivate_","sucks"),o._area=t,t.removeItem("_safariPrivate_")}catch(e){}return o._area||(o._area=r.inherit(r.storageAPI,{items:{},name:"fake"})),o._ns=n||"",r.areas[e]||(r.areas[e]=o._area),r.apis[o._ns+o._id]||(r.apis[o._ns+o._id]=o),o},storeAPI:{area:function(e,t){var n=this[e];return n&&n.area||(n=r.Store(e,t,this._ns),this[e]||(this[e]=n)),n},namespace:function(e,t){if(!e)return this._ns?this._ns.substring(0,this._ns.length-1):"";var n=e,o=this[n];return o&&o.namespace||(o=r.Store(this._id,this._area,this._ns+n+"."),this[n]||(this[n]=o),t||o.area("session",r.areas.session)),o},isFake:function(){return"fake"===this._area.name},toString:function(){return"store"+(this._ns?"."+this.namespace():"")+"["+this._id+"]"},has:function(e){return this._area.has?this._area.has(this._in(e)):!!(this._in(e)in this._area)},size:function(){return this.keys().length},each:function(e,t){for(var n=0,o=r.length(this._area);n<o;n++){var i=this._out(r.key(this._area,n));if(void 0!==i&&!1===e.call(this,i,t||this.get(i)))break;o>r.length(this._area)&&(o--,n--)}return t||this},keys:function(e){return this.each(function(e,t){t.push(e)},e||[])},get:function(e,t){var n=r.get(this._area,this._in(e));return null!==n?r.parse(n):t||n},getAll:function(e){return this.each(function(e,t){t[e]=this.get(e)},e||{})},transact:function(e,t,n){var r=this.get(e,n),o=t(r);return this.set(e,void 0===o?r:o),this},set:function(e,t,n){var o=this.get(e);return null!=o&&!1===n?t:r.set(this._area,this._in(e),r.stringify(t),n)||o},setAll:function(e,t){var n,r;for(var o in e)r=e[o],this.set(o,r,t)!==r&&(n=!0);return n},add:function(e,t){var n=this.get(e);if(n instanceof Array)t=n.concat(t);else if(null!==n){var o=typeof n;if(o===typeof t&&"object"===o){for(var i in t)n[i]=t[i];t=n}else t=n+t}return r.set(this._area,this._in(e),r.stringify(t)),t},remove:function(e){var t=this.get(e);return r.remove(this._area,this._in(e)),t},clear:function(){return this._ns?this.each(function(e){r.remove(this._area,this._in(e))},1):r.clear(this._area),this},clearAll:function(){var e=this._area;for(var t in r.areas)r.areas.hasOwnProperty(t)&&(this._area=r.areas[t],this.clear());return this._area=e,this},_in:function(e){return"string"!=typeof e&&(e=r.stringify(e)),this._ns?this._ns+e:e},_out:function(e){return this._ns?e&&0===e.indexOf(this._ns)?e.substring(this._ns.length):void 0:e}},storageAPI:{length:0,has:function(e){return this.items.hasOwnProperty(e)},key:function(e){var t=0;for(var n in this.items)if(this.has(n)&&e===t++)return n},setItem:function(e,t){this.has(e)||this.length++,this.items[e]=t},removeItem:function(e){this.has(e)&&(delete this.items[e],this.length--)},getItem:function(e){return this.has(e)?this.items[e]:null},clear:function(){for(var e in this.items)this.removeItem(e)},toString:function(){return this.length+" items in "+this.name+"Storage"}}},o=r.Store("local",function(){try{return localStorage}catch(e){}}());o.local=o,o._=r,o.area("session",function(){try{return sessionStorage}catch(e){}}()),"function"==typeof n&&void 0!==n.amd?n("store2",[],function(){return o}):void 0!==e&&e.exports?e.exports=o:(t.store&&(r.conflict=t.store),t.store=o)}(this,this.define)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=s(n(65)),o=s(n(4));t.default=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:l,t=arguments[1],n=t.saved;switch(t.type){case i.review.save:case i.review.unsave:case i.review.refresh:return a.setSaved(n),(0,o.default)({},e,{count:a.getSavedCount(n),saved:(0,o.default)({},n)});case i.review.clear:return a.setSaved({}),(0,o.default)({},e,{count:0,saved:{}});case i.review.dedupe:return(0,o.default)({},e,{deduped:t.deduped});case i.review.loading:return(0,o.default)({},e,(0,r.default)({},t.tag,{loading:!0}));case i.review.loaded:return(0,o.default)({},e,(0,r.default)({},t.tag,t.data||{}));case i.review.error:return(0,o.default)({},e,(0,r.default)({},t.tag,{error:t.err}));default:return e}};var i=u(n(39)),a=u(n(159));function u(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}function s(e){return e&&e.__esModule?e:{default:e}}var c=a.getSaved(),l={saved:c,count:a.getSavedCount(c),deduped:!1,dedupe:{count:0},create:{}}},function(e,t,n){"use strict";const r=n(355),o=n(356);function i(e,t){return t.encode?t.strict?r(e):encodeURIComponent(e):e}function a(e,t){return t.decode?o(e):e}function u(e){const t=e.indexOf("?");return-1===t?"":e.slice(t+1)}function s(e,t){const n=function(e){let t;switch(e.arrayFormat){case"index":return(e,n,r)=>{t=/\[(\d*)\]$/.exec(e),e=e.replace(/\[\d*\]$/,""),t?(void 0===r[e]&&(r[e]={}),r[e][t[1]]=n):r[e]=n};case"bracket":return(e,n,r)=>{t=/(\[\])$/.exec(e),e=e.replace(/\[\]$/,""),t?void 0!==r[e]?r[e]=[].concat(r[e],n):r[e]=[n]:r[e]=n};default:return(e,t,n)=>{void 0!==n[e]?n[e]=[].concat(n[e],t):n[e]=t}}}(t=Object.assign({decode:!0,arrayFormat:"none"},t)),r=Object.create(null);if("string"!=typeof e)return r;if(!(e=e.trim().replace(/^[?#&]/,"")))return r;for(const o of e.split("&")){let[e,i]=o.replace(/\+/g," ").split("=");i=void 0===i?null:a(i,t),n(a(e,t),i,r)}return Object.keys(r).sort().reduce((e,t)=>{const n=r[t];return Boolean(n)&&"object"==typeof n&&!Array.isArray(n)?e[t]=function e(t){return Array.isArray(t)?t.sort():"object"==typeof t?e(Object.keys(t)).sort((e,t)=>Number(e)-Number(t)).map(e=>t[e]):t}(n):e[t]=n,e},Object.create(null))}t.extract=u,t.parse=s,t.stringify=((e,t)=>{if(!e)return"";const n=function(e){switch(e.arrayFormat){case"index":return(t,n,r)=>null===n?[i(t,e),"[",r,"]"].join(""):[i(t,e),"[",i(r,e),"]=",i(n,e)].join("");case"bracket":return(t,n)=>null===n?[i(t,e),"[]"].join(""):[i(t,e),"[]=",i(n,e)].join("");default:return(t,n)=>null===n?i(t,e):[i(t,e),"=",i(n,e)].join("")}}(t=Object.assign({encode:!0,strict:!0,arrayFormat:"none"},t)),r=Object.keys(e);return!1!==t.sort&&r.sort(t.sort),r.map(r=>{const o=e[r];if(void 0===o)return"";if(null===o)return i(r,t);if(Array.isArray(o)){const e=[];for(const t of o.slice())void 0!==t&&e.push(n(r,t,e.length));return e.join("&")}return i(r,t)+"="+i(o,t)}).filter(e=>e.length>0).join("&")}),t.parseUrl=((e,t)=>{const n=e.indexOf("#");return-1!==n&&(e=e.slice(0,n)),{url:e.split("?")[0]||"",query:s(u(e),t)}})},function(e,t,n){"use strict";e.exports=(e=>encodeURIComponent(e).replace(/[!'()*]/g,e=>`%${e.charCodeAt(0).toString(16).toUpperCase()}`))},function(e,t,n){"use strict";var r=new RegExp("%[a-f0-9]{2}","gi"),o=new RegExp("(%[a-f0-9]{2})+","gi");function i(e,t){try{return decodeURIComponent(e.join(""))}catch(e){}if(1===e.length)return e;t=t||1;var n=e.slice(0,t),r=e.slice(t);return Array.prototype.concat.call([],i(n),i(r))}function a(e){try{return decodeURIComponent(e)}catch(o){for(var t=e.match(r),n=1;n<t.length;n++)t=(e=i(t,n).join("")).match(r);return e}}e.exports=function(e){if("string"!=typeof e)throw new TypeError("Expected `encodedURI` to be of type `string`, got `"+typeof e+"`");try{return e=e.replace(/\+/g," "),decodeURIComponent(e)}catch(t){return function(e){for(var t={"%FE%FF":"��","%FF%FE":"��"},n=o.exec(e);n;){try{t[n[0]]=decodeURIComponent(n[0])}catch(e){var r=a(n[0]);r!==n[0]&&(t[n[0]]=r)}n=o.exec(e)}t["%C2"]="�";for(var i=Object.keys(t),u=0;u<i.length;u++){var s=i[u];e=e.replace(new RegExp(s,"g"),t[s])}return e}(e)}}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=m(n(358)),o=m(n(160)),i=m(n(28)),a=m(n(4)),u=m(n(1)),s=n(16),c=n(15),l=n(2),f=n(11),d=h(n(97)),p=h(n(21));function h(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}function m(e){return e&&e.__esModule?e:{default:e}}function v(e){var t=e.saved,n=void 0===t?{}:t,o=e.frames,i=e.options,c=e.review,l=(e.search,e.minDistance),d=e.label,h=e.count,m=(0,r.default)(e,["saved","frames","options","review","search","minDistance","label","count"]);return o?u.default.createElement("div",{className:d?"keyframes keyframeGroup":"keyframes"},d&&u.default.createElement("h4",null,u.default.createElement(s.Link,{to:p.publicUrl.browse(d)},d)," (",h,")"),o.map(function(e){var t=e.hash,r=e.frame,o=e.verified,s=e.distance;return u.default.createElement(f.Keyframe,(0,a.default)({key:t+"_"+r,sha256:t,frame:r,score:100-Math.round(s-l)+"%",verified:o,isSaved:!!n[t]&&!!n[t].frames&&!!n[t].frames[parseInt(r,10)],size:i.thumbnailSize,onClick:function(){return c.toggleSaved({verified:o,hash:t,frame:r})},reviewActions:c},m))})):null}t.default=(0,l.connect)(function(e){return{saved:e.review.saved,options:e.search.options}},function(e){return{review:(0,c.bindActionCreators)((0,a.default)({},d),e),search:(0,c.bindActionCreators)((0,a.default)({},p),e)}})(function(e){var t=e.frames,n=e.groupByHash,r=0;if(t&&t.length&&(r=t[0].distance||0),!n)return u.default.createElement(v,(0,a.default)({minDistance:r},e));var s=t.reduce(function(e,t){return e[t.hash]?e[t.hash].push(t):e[t.hash]=[t],e},{});return(0,i.default)(s).map(function(e){return[s[e].length,e]}).sort(function(e,t){return t[0]-e[0]}).map(function(t){var n=(0,o.default)(t,2),i=n[0],c=n[1];return u.default.createElement(v,(0,a.default)({},e,{count:i,key:c,minDistance:r,frames:s[c],label:c}))})})},function(e,t,n){"use strict";t.__esModule=!0,t.default=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}},function(e,t,n){e.exports={default:n(360),__esModule:!0}},function(e,t,n){n(58),n(44),e.exports=n(361)},function(e,t,n){var r=n(96),o=n(14)("iterator"),i=n(36);e.exports=n(10).isIterable=function(e){var t=Object(e);return void 0!==t[o]||"@@iterator"in t||i.hasOwnProperty(r(t))}},function(e,t,n){e.exports={default:n(363),__esModule:!0}},function(e,t,n){n(58),n(44),e.exports=n(364)},function(e,t,n){var r=n(20),o=n(95);e.exports=n(10).getIterator=function(e){var t=o(e);if("function"!=typeof t)throw TypeError(e+" is not iterable!");return r(t.call(e))}},function(e,t,n){e.exports={default:n(366),__esModule:!0}},function(e,t,n){n(134),n(44),n(58),n(367),n(375),n(376),e.exports=n(10).Promise},function(e,t,n){"use strict";var r,o,i,a,u=n(42),s=n(13),c=n(34),l=n(96),f=n(18),d=n(23),p=n(56),h=n(368),m=n(369),v=n(161),y=n(162).set,g=n(371)(),_=n(98),b=n(163),w=n(372),x=n(164),E=s.TypeError,O=s.process,S=O&&O.versions,T=S&&S.v8||"",k=s.Promise,R="process"==l(O),j=function(){},P=o=_.f,C=!!function(){try{var e=k.resolve(1),t=(e.constructor={})[n(14)("species")]=function(e){e(j,j)};return(R||"function"==typeof PromiseRejectionEvent)&&e.then(j)instanceof t&&0!==T.indexOf("6.6")&&-1===w.indexOf("Chrome/66")}catch(e){}}(),M=function(e){var t;return!(!d(e)||"function"!=typeof(t=e.then))&&t},I=function(e,t){if(!e._n){e._n=!0;var n=e._c;g(function(){for(var r=e._v,o=1==e._s,i=0,a=function(t){var n,i,a,u=o?t.ok:t.fail,s=t.resolve,c=t.reject,l=t.domain;try{u?(o||(2==e._h&&N(e),e._h=1),!0===u?n=r:(l&&l.enter(),n=u(r),l&&(l.exit(),a=!0)),n===t.promise?c(E("Promise-chain cycle")):(i=M(n))?i.call(n,s,c):s(n)):c(r)}catch(e){l&&!a&&l.exit(),c(e)}};n.length>i;)a(n[i++]);e._c=[],e._n=!1,t&&!e._h&&A(e)})}},A=function(e){y.call(s,function(){var t,n,r,o=e._v,i=D(e);if(i&&(t=b(function(){R?O.emit("unhandledRejection",o,e):(n=s.onunhandledrejection)?n({promise:e,reason:o}):(r=s.console)&&r.error&&r.error("Unhandled promise rejection",o)}),e._h=R||D(e)?2:1),e._a=void 0,i&&t.e)throw t.v})},D=function(e){return 1!==e._h&&0===(e._a||e._c).length},N=function(e){y.call(s,function(){var t;R?O.emit("rejectionHandled",e):(t=s.onrejectionhandled)&&t({promise:e,reason:e._v})})},L=function(e){var t=this;t._d||(t._d=!0,(t=t._w||t)._v=e,t._s=2,t._a||(t._a=t._c.slice()),I(t,!0))},U=function(e){var t,n=this;if(!n._d){n._d=!0,n=n._w||n;try{if(n===e)throw E("Promise can't be resolved itself");(t=M(e))?g(function(){var r={_w:n,_d:!1};try{t.call(e,c(U,r,1),c(L,r,1))}catch(e){L.call(r,e)}}):(n._v=e,n._s=1,I(n,!1))}catch(e){L.call({_w:n,_d:!1},e)}}};C||(k=function(e){h(this,k,"Promise","_h"),p(e),r.call(this);try{e(c(U,this,1),c(L,this,1))}catch(e){L.call(this,e)}},(r=function(e){this._c=[],this._a=void 0,this._s=0,this._d=!1,this._v=void 0,this._h=0,this._n=!1}).prototype=n(373)(k.prototype,{then:function(e,t){var n=P(v(this,k));return n.ok="function"!=typeof e||e,n.fail="function"==typeof t&&t,n.domain=R?O.domain:void 0,this._c.push(n),this._a&&this._a.push(n),this._s&&I(this,!1),n.promise},catch:function(e){return this.then(void 0,e)}}),i=function(){var e=new r;this.promise=e,this.resolve=c(U,e,1),this.reject=c(L,e,1)},_.f=P=function(e){return e===k||e===a?new i(e):o(e)}),f(f.G+f.W+f.F*!C,{Promise:k}),n(57)(k,"Promise"),n(374)("Promise"),a=n(10).Promise,f(f.S+f.F*!C,"Promise",{reject:function(e){var t=P(this);return(0,t.reject)(e),t.promise}}),f(f.S+f.F*(u||!C),"Promise",{resolve:function(e){return x(u&&this===a?k:this,e)}}),f(f.S+f.F*!(C&&n(157)(function(e){k.all(e).catch(j)})),"Promise",{all:function(e){var t=this,n=P(t),r=n.resolve,o=n.reject,i=b(function(){var n=[],i=0,a=1;m(e,!1,function(e){var u=i++,s=!1;n.push(void 0),a++,t.resolve(e).then(function(e){s||(s=!0,n[u]=e,--a||r(n))},o)}),--a||r(n)});return i.e&&o(i.v),n.promise},race:function(e){var t=this,n=P(t),r=n.reject,o=b(function(){m(e,!1,function(e){t.resolve(e).then(n.resolve,r)})});return o.e&&r(o.v),n.promise}})},function(e,t){e.exports=function(e,t,n,r){if(!(e instanceof t)||void 0!==r&&r in e)throw TypeError(n+": incorrect invocation!");return e}},function(e,t,n){var r=n(34),o=n(155),i=n(156),a=n(20),u=n(81),s=n(95),c={},l={};(t=e.exports=function(e,t,n,f,d){var p,h,m,v,y=d?function(){return e}:s(e),g=r(n,f,t?2:1),_=0;if("function"!=typeof y)throw TypeError(e+" is not iterable!");if(i(y)){for(p=u(e.length);p>_;_++)if((v=t?g(a(h=e[_])[0],h[1]):g(e[_]))===c||v===l)return v}else for(m=y.call(e);!(h=m.next()).done;)if((v=o(m,g,h.value,t))===c||v===l)return v}).BREAK=c,t.RETURN=l},function(e,t){e.exports=function(e,t,n){var r=void 0===n;switch(t.length){case 0:return r?e():e.call(n);case 1:return r?e(t[0]):e.call(n,t[0]);case 2:return r?e(t[0],t[1]):e.call(n,t[0],t[1]);case 3:return r?e(t[0],t[1],t[2]):e.call(n,t[0],t[1],t[2]);case 4:return r?e(t[0],t[1],t[2],t[3]):e.call(n,t[0],t[1],t[2],t[3])}return e.apply(n,t)}},function(e,t,n){var r=n(13),o=n(162).set,i=r.MutationObserver||r.WebKitMutationObserver,a=r.process,u=r.Promise,s="process"==n(46)(a);e.exports=function(){var e,t,n,c=function(){var r,o;for(s&&(r=a.domain)&&r.exit();e;){o=e.fn,e=e.next;try{o()}catch(r){throw e?n():t=void 0,r}}t=void 0,r&&r.enter()};if(s)n=function(){a.nextTick(c)};else if(!i||r.navigator&&r.navigator.standalone)if(u&&u.resolve){var l=u.resolve(void 0);n=function(){l.then(c)}}else n=function(){o.call(r,c)};else{var f=!0,d=document.createTextNode("");new i(c).observe(d,{characterData:!0}),n=function(){d.data=f=!f}}return function(r){var o={fn:r,next:void 0};t&&(t.next=o),e||(e=o,n()),t=o}}},function(e,t,n){var r=n(13).navigator;e.exports=r&&r.userAgent||""},function(e,t,n){var r=n(26);e.exports=function(e,t,n){for(var o in t)n&&e[o]?e[o]=t[o]:r(e,o,t[o]);return e}},function(e,t,n){"use strict";var r=n(13),o=n(10),i=n(22),a=n(24),u=n(14)("species");e.exports=function(e){var t="function"==typeof o[e]?o[e]:r[e];a&&t&&!t[u]&&i.f(t,u,{configurable:!0,get:function(){return this}})}},function(e,t,n){"use strict";var r=n(18),o=n(10),i=n(13),a=n(161),u=n(164);r(r.P+r.R,"Promise",{finally:function(e){var t=a(this,o.Promise||i.Promise),n="function"==typeof e;return this.then(n?function(n){return u(t,e()).then(function(){return n})}:e,n?function(n){return u(t,e()).then(function(){throw n})}:e)}})},function(e,t,n){"use strict";var r=n(18),o=n(98),i=n(163);r(r.S,"Promise",{try:function(e){var t=o.f(this),n=i(e);return(n.e?t.reject:t.resolve)(n.v),t.promise}})},function(e,t,n){var r=n(0);e.exports=function(e,t,n,o){var i=r(e).getTime(),a=r(t).getTime(),u=r(n).getTime(),s=r(o).getTime();if(i>a||u>s)throw new Error("The start of the range cannot be after the end of the range");return i<s&&u<a}},function(e,t,n){var r=n(0);e.exports=function(e,t){if(!(t instanceof Array))throw new TypeError(toString.call(t)+" is not an instance of Array");var n,o,i=r(e).getTime();return t.forEach(function(e,t){var a=r(e),u=Math.abs(i-a.getTime());(void 0===n||u<o)&&(n=t,o=u)}),n}},function(e,t,n){var r=n(0);e.exports=function(e,t){if(!(t instanceof Array))throw new TypeError(toString.call(t)+" is not an instance of Array");var n,o,i=r(e).getTime();return t.forEach(function(e){var t=r(e),a=Math.abs(i-t.getTime());(void 0===n||a<o)&&(n=t,o=a)}),n}},function(e,t,n){var r=n(30),o=6e4,i=6048e5;e.exports=function(e,t){var n=r(e),a=r(t),u=n.getTime()-n.getTimezoneOffset()*o,s=a.getTime()-a.getTimezoneOffset()*o;return Math.round((u-s)/i)}},function(e,t,n){var r=n(175),o=n(0);e.exports=function(e,t){var n=o(e),i=o(t);return 4*(n.getFullYear()-i.getFullYear())+(r(n)-r(i))}},function(e,t,n){var r=n(66),o=6e4,i=6048e5;e.exports=function(e,t,n){var a=r(e,n),u=r(t,n),s=a.getTime()-a.getTimezoneOffset()*o,c=u.getTime()-u.getTimezoneOffset()*o;return Math.round((s-c)/i)}},function(e,t,n){var r=n(69),o=36e5;e.exports=function(e,t){var n=r(e,t)/o;return n>0?Math.floor(n):Math.ceil(n)}},function(e,t,n){var r=n(0),o=n(173),i=n(53),a=n(178);e.exports=function(e,t){var n=r(e),u=r(t),s=i(n,u),c=Math.abs(o(n,u));return n=a(n,s*c),s*(c-(i(n,u)===-s))}},function(e,t,n){var r=n(69),o=6e4;e.exports=function(e,t){var n=r(e,t)/o;return n>0?Math.floor(n):Math.ceil(n)}},function(e,t,n){var r=n(103);e.exports=function(e,t){var n=r(e,t)/3;return n>0?Math.floor(n):Math.ceil(n)}},function(e,t,n){var r=n(177);e.exports=function(e,t){var n=r(e,t)/7;return n>0?Math.floor(n):Math.ceil(n)}},function(e,t,n){var r=n(0),o=n(176),i=n(53);e.exports=function(e,t){var n=r(e),a=r(t),u=i(n,a),s=Math.abs(o(n,a));return n.setFullYear(n.getFullYear()-u*s),u*(s-(i(n,a)===-u))}},function(e,t){e.exports=function(){var e={lessThanXSeconds:{one:"less than a second",other:"less than {{count}} seconds"},xSeconds:{one:"1 second",other:"{{count}} seconds"},halfAMinute:"half a minute",lessThanXMinutes:{one:"less than a minute",other:"less than {{count}} minutes"},xMinutes:{one:"1 minute",other:"{{count}} minutes"},aboutXHours:{one:"about 1 hour",other:"about {{count}} hours"},xHours:{one:"1 hour",other:"{{count}} hours"},xDays:{one:"1 day",other:"{{count}} days"},aboutXMonths:{one:"about 1 month",other:"about {{count}} months"},xMonths:{one:"1 month",other:"{{count}} months"},aboutXYears:{one:"about 1 year",other:"about {{count}} years"},xYears:{one:"1 year",other:"{{count}} years"},overXYears:{one:"over 1 year",other:"over {{count}} years"},almostXYears:{one:"almost 1 year",other:"almost {{count}} years"}};return{localize:function(t,n,r){var o;return r=r||{},o="string"==typeof e[t]?e[t]:1===n?e[t].one:e[t].other.replace("{{count}}",n),r.addSuffix?r.comparison>0?"in "+o:o+" ago":o}}}},function(e,t,n){var r=n(391);e.exports=function(){var e=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],t=["January","February","March","April","May","June","July","August","September","October","November","December"],n=["Su","Mo","Tu","We","Th","Fr","Sa"],o=["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],i=["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],a=["AM","PM"],u=["am","pm"],s=["a.m.","p.m."],c={MMM:function(t){return e[t.getMonth()]},MMMM:function(e){return t[e.getMonth()]},dd:function(e){return n[e.getDay()]},ddd:function(e){return o[e.getDay()]},dddd:function(e){return i[e.getDay()]},A:function(e){return e.getHours()/12>=1?a[1]:a[0]},a:function(e){return e.getHours()/12>=1?u[1]:u[0]},aa:function(e){return e.getHours()/12>=1?s[1]:s[0]}};return["M","D","DDD","d","Q","W"].forEach(function(e){c[e+"o"]=function(t,n){return function(e){var t=e%100;if(t>20||t<10)switch(t%10){case 1:return e+"st";case 2:return e+"nd";case 3:return e+"rd"}return e+"th"}(n[e](t))}}),{formatters:c,formattingTokensRegExp:r(c)}}},function(e,t){var n=["M","MM","Q","D","DD","DDD","DDDD","d","E","W","WW","YY","YYYY","GG","GGGG","H","HH","h","hh","m","mm","s","ss","S","SS","SSS","Z","ZZ","X","x"];e.exports=function(e){var t=[];for(var r in e)e.hasOwnProperty(r)&&t.push(r);var o=n.concat(t).sort().reverse();return new RegExp("(\\[[^\\[]*\\])|(\\\\)?("+o.join("|")+"|.)","g")}},function(e,t,n){var r=n(102),o=n(0),i=n(104),a=n(105),u=1440,s=43200,c=525600;e.exports=function(e,t,n){var l=n||{},f=r(e,t),d=l.locale,p=a.distanceInWords.localize;d&&d.distanceInWords&&d.distanceInWords.localize&&(p=d.distanceInWords.localize);var h,m,v,y={addSuffix:Boolean(l.addSuffix),comparison:f};f>0?(h=o(e),m=o(t)):(h=o(t),m=o(e));var g=Math[l.partialMethod?String(l.partialMethod):"floor"],_=i(m,h),b=m.getTimezoneOffset()-h.getTimezoneOffset(),w=g(_/60)-b;if("s"===(v=l.unit?String(l.unit):w<1?"s":w<60?"m":w<u?"h":w<s?"d":w<c?"M":"Y"))return p("xSeconds",_,y);if("m"===v)return p("xMinutes",w,y);if("h"===v)return p("xHours",g(w/60),y);if("d"===v)return p("xDays",g(w/u),y);if("M"===v)return p("xMonths",g(w/s),y);if("Y"===v)return p("xYears",g(w/c),y);throw new Error("Unknown unit: "+v)}},function(e,t,n){var r=n(179);e.exports=function(e,t){return r(Date.now(),e,t)}},function(e,t,n){var r=n(0);e.exports=function(e,t,n){var o=r(e),i=r(t),a=void 0!==n?n:1,u=i.getTime();if(o.getTime()>u)throw new Error("The first date cannot be after the second date");var s=[],c=o;for(c.setHours(0,0,0,0);c.getTime()<=u;)s.push(r(c)),c.setDate(c.getDate()+a);return s}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e);return t.setMinutes(59,59,999),t}},function(e,t,n){var r=n(180);e.exports=function(e){return r(e,{weekStartsOn:1})}},function(e,t,n){var r=n(29),o=n(30);e.exports=function(e){var t=r(e),n=new Date(0);n.setFullYear(t+1,0,4),n.setHours(0,0,0,0);var i=o(n);return i.setMilliseconds(i.getMilliseconds()-1),i}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e);return t.setSeconds(59,999),t}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e),n=t.getMonth(),o=n-n%3+3;return t.setMonth(o,0),t.setHours(23,59,59,999),t}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e);return t.setMilliseconds(999),t}},function(e,t,n){var r=n(106);e.exports=function(){return r(new Date)}},function(e,t){e.exports=function(){var e=new Date,t=e.getFullYear(),n=e.getMonth(),r=e.getDate(),o=new Date(0);return o.setFullYear(t,n,r+1),o.setHours(23,59,59,999),o}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e),n=t.getFullYear();return t.setFullYear(n+1,0,0),t.setHours(23,59,59,999),t}},function(e,t){e.exports=function(){var e=new Date,t=e.getFullYear(),n=e.getMonth(),r=e.getDate(),o=new Date(0);return o.setFullYear(t,n,r-1),o.setHours(23,59,59,999),o}},function(e,t,n){var r=n(182),o=n(107),i=n(29),a=n(0),u=n(184),s=n(105);var c={M:function(e){return e.getMonth()+1},MM:function(e){return d(e.getMonth()+1,2)},Q:function(e){return Math.ceil((e.getMonth()+1)/3)},D:function(e){return e.getDate()},DD:function(e){return d(e.getDate(),2)},DDD:function(e){return r(e)},DDDD:function(e){return d(r(e),3)},d:function(e){return e.getDay()},E:function(e){return e.getDay()||7},W:function(e){return o(e)},WW:function(e){return d(o(e),2)},YY:function(e){return d(e.getFullYear(),4).substr(2)},YYYY:function(e){return d(e.getFullYear(),4)},GG:function(e){return String(i(e)).substr(2)},GGGG:function(e){return i(e)},H:function(e){return e.getHours()},HH:function(e){return d(e.getHours(),2)},h:function(e){var t=e.getHours();return 0===t?12:t>12?t%12:t},hh:function(e){return d(c.h(e),2)},m:function(e){return e.getMinutes()},mm:function(e){return d(e.getMinutes(),2)},s:function(e){return e.getSeconds()},ss:function(e){return d(e.getSeconds(),2)},S:function(e){return Math.floor(e.getMilliseconds()/100)},SS:function(e){return d(Math.floor(e.getMilliseconds()/10),2)},SSS:function(e){return d(e.getMilliseconds(),3)},Z:function(e){return f(e.getTimezoneOffset(),":")},ZZ:function(e){return f(e.getTimezoneOffset())},X:function(e){return Math.floor(e.getTime()/1e3)},x:function(e){return e.getTime()}};function l(e){return e.match(/\[[\s\S]/)?e.replace(/^\[|]$/g,""):e.replace(/\\/g,"")}function f(e,t){t=t||"";var n=e>0?"-":"+",r=Math.abs(e),o=r%60;return n+d(Math.floor(r/60),2)+t+d(o,2)}function d(e,t){for(var n=Math.abs(e).toString();n.length<t;)n="0"+n;return n}e.exports=function(e,t,n){var r=t?String(t):"YYYY-MM-DDTHH:mm:ss.SSSZ",o=(n||{}).locale,i=s.format.formatters,f=s.format.formattingTokensRegExp;o&&o.format&&o.format.formatters&&(i=o.format.formatters,o.format.formattingTokensRegExp&&(f=o.format.formattingTokensRegExp));var d=a(e);return u(d)?function(e,t,n){var r,o,i=e.match(n),a=i.length;for(r=0;r<a;r++)o=t[i[r]]||c[i[r]],i[r]=o||l(i[r]);return function(e){for(var t="",n=0;n<a;n++)i[n]instanceof Function?t+=i[n](e,c):t+=i[n];return t}}(r,i,f)(d):"Invalid Date"}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getDate()}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getDay()}},function(e,t,n){var r=n(185);e.exports=function(e){return r(e)?366:365}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getHours()}},function(e,t,n){var r=n(52),o=n(101),i=6048e5;e.exports=function(e){var t=r(e),n=r(o(t,60)).valueOf()-t.valueOf();return Math.round(n/i)}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getMilliseconds()}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getMinutes()}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getMonth()}},function(e,t,n){var r=n(0),o=864e5;e.exports=function(e,t,n,i){var a=r(e).getTime(),u=r(t).getTime(),s=r(n).getTime(),c=r(i).getTime();if(a>u||s>c)throw new Error("The start of the range cannot be after the end of the range");if(!(a<c&&s<u))return 0;var l=(c>u?u:c)-(s<a?a:s);return Math.ceil(l/o)}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getSeconds()}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getTime()}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getFullYear()}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=r(t);return n.getTime()>o.getTime()}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=r(t);return n.getTime()<o.getTime()}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=r(t);return n.getTime()===o.getTime()}},function(e,t,n){var r=n(0);e.exports=function(e){return 1===r(e).getDate()}},function(e,t,n){var r=n(0);e.exports=function(e){return 5===r(e).getDay()}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getTime()>(new Date).getTime()}},function(e,t,n){var r=n(0),o=n(106),i=n(181);e.exports=function(e){var t=r(e);return o(t).getTime()===i(t).getTime()}},function(e,t,n){var r=n(0);e.exports=function(e){return 1===r(e).getDay()}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getTime()<(new Date).getTime()}},function(e,t,n){var r=n(31);e.exports=function(e,t){var n=r(e),o=r(t);return n.getTime()===o.getTime()}},function(e,t,n){var r=n(0);e.exports=function(e){return 6===r(e).getDay()}},function(e,t,n){var r=n(0);e.exports=function(e){return 0===r(e).getDay()}},function(e,t,n){var r=n(187);e.exports=function(e){return r(new Date,e)}},function(e,t,n){var r=n(189);e.exports=function(e){return r(new Date,e)}},function(e,t,n){var r=n(190);e.exports=function(e){return r(new Date,e)}},function(e,t,n){var r=n(191);e.exports=function(e){return r(new Date,e)}},function(e,t,n){var r=n(193);e.exports=function(e){return r(new Date,e)}},function(e,t,n){var r=n(194);e.exports=function(e){return r(new Date,e)}},function(e,t,n){var r=n(196);e.exports=function(e){return r(new Date,e)}},function(e,t,n){var r=n(108);e.exports=function(e,t){return r(new Date,e,t)}},function(e,t,n){var r=n(198);e.exports=function(e){return r(new Date,e)}},function(e,t,n){var r=n(0);e.exports=function(e){return 4===r(e).getDay()}},function(e,t,n){var r=n(31);e.exports=function(e){return r(e).getTime()===r(new Date).getTime()}},function(e,t,n){var r=n(31);e.exports=function(e){var t=new Date;return t.setDate(t.getDate()+1),r(e).getTime()===r(t).getTime()}},function(e,t,n){var r=n(0);e.exports=function(e){return 2===r(e).getDay()}},function(e,t,n){var r=n(0);e.exports=function(e){return 3===r(e).getDay()}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e).getDay();return 0===t||6===t}},function(e,t,n){var r=n(0);e.exports=function(e,t,n){var o=r(e).getTime(),i=r(t).getTime(),a=r(n).getTime();if(i>a)throw new Error("The start of the range cannot be after the end of the range");return o>=i&&o<=a}},function(e,t,n){var r=n(31);e.exports=function(e){var t=new Date;return t.setDate(t.getDate()-1),r(e).getTime()===r(t).getTime()}},function(e,t,n){var r=n(199);e.exports=function(e){return r(e,{weekStartsOn:1})}},function(e,t,n){var r=n(29),o=n(30);e.exports=function(e){var t=r(e),n=new Date(0);n.setFullYear(t+1,0,4),n.setHours(0,0,0,0);var i=o(n);return i.setDate(i.getDate()-1),i}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e),n=t.getMonth();return t.setFullYear(t.getFullYear(),n+1,0),t.setHours(0,0,0,0),t}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e),n=t.getMonth(),o=n-n%3+3;return t.setMonth(o,0),t.setHours(0,0,0,0),t}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e),n=t.getFullYear();return t.setFullYear(n+1,0,0),t.setHours(0,0,0,0),t}},function(e,t,n){var r=n(0);e.exports=function(){var e=Array.prototype.slice.call(arguments).map(function(e){return r(e)}),t=Math.max.apply(null,e);return new Date(t)}},function(e,t,n){var r=n(0);e.exports=function(){var e=Array.prototype.slice.call(arguments).map(function(e){return r(e)}),t=Math.min.apply(null,e);return new Date(t)}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=Number(t);return n.setDate(o),n}},function(e,t,n){var r=n(0),o=n(50);e.exports=function(e,t,n){var i=n&&Number(n.weekStartsOn)||0,a=r(e),u=Number(t),s=a.getDay();return o(a,((u%7+7)%7<i?7:0)+u-s)}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=Number(t);return n.setMonth(0),n.setDate(o),n}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=Number(t);return n.setHours(o),n}},function(e,t,n){var r=n(0),o=n(50),i=n(186);e.exports=function(e,t){var n=r(e),a=Number(t),u=i(n);return o(n,a-u)}},function(e,t,n){var r=n(0),o=n(107);e.exports=function(e,t){var n=r(e),i=Number(t),a=o(n)-i;return n.setDate(n.getDate()-7*a),n}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=Number(t);return n.setMilliseconds(o),n}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=Number(t);return n.setMinutes(o),n}},function(e,t,n){var r=n(0),o=n(200);e.exports=function(e,t){var n=r(e),i=Number(t)-(Math.floor(n.getMonth()/3)+1);return o(n,n.getMonth()+3*i)}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=Number(t);return n.setSeconds(o),n}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=Number(t);return n.setFullYear(o),n}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e);return t.setDate(1),t.setHours(0,0,0,0),t}},function(e,t,n){var r=n(31);e.exports=function(){return r(new Date)}},function(e,t){e.exports=function(){var e=new Date,t=e.getFullYear(),n=e.getMonth(),r=e.getDate(),o=new Date(0);return o.setFullYear(t,n,r+1),o.setHours(0,0,0,0),o}},function(e,t){e.exports=function(){var e=new Date,t=e.getFullYear(),n=e.getMonth(),r=e.getDate(),o=new Date(0);return o.setFullYear(t,n,r-1),o.setHours(0,0,0,0),o}},function(e,t,n){var r=n(50);e.exports=function(e,t){var n=Number(t);return r(e,-n)}},function(e,t,n){var r=n(166);e.exports=function(e,t){var n=Number(t);return r(e,-n)}},function(e,t,n){var r=n(51);e.exports=function(e,t){var n=Number(t);return r(e,-n)}},function(e,t,n){var r=n(169);e.exports=function(e,t){var n=Number(t);return r(e,-n)}},function(e,t,n){var r=n(68);e.exports=function(e,t){var n=Number(t);return r(e,-n)}},function(e,t,n){var r=n(170);e.exports=function(e,t){var n=Number(t);return r(e,-n)}},function(e,t,n){var r=n(171);e.exports=function(e,t){var n=Number(t);return r(e,-n)}},function(e,t,n){var r=n(101);e.exports=function(e,t){var n=Number(t);return r(e,-n)}},function(e,t,n){var r=n(172);e.exports=function(e,t){var n=Number(t);return r(e,-n)}},function(e,t,n){(function(t){var r,o,i,a;i=n(479),a=n(493),o=n(495),e.exports=function(){var e,n,o,i,a;return 3===arguments.length?(o=arguments[0],i=arguments[1],e=arguments[2]):2===arguments.length?(Array.isArray(arguments[0])?o=arguments[0]:i=arguments[0],"function"==typeof arguments[1]?e=arguments[1]:i=arguments[1]):1===arguments.length&&("function"==typeof arguments[0]?e=arguments[0]:Array.isArray(arguments[0])?o=arguments[0]:i=arguments[0]),null==i&&(i={}),a=new r(i),o&&t.nextTick(function(){var e,t,n;for(t=0,n=o.length;t<n;t++)e=o[t],a.write(e);return a.end()}),e&&(n=[],a.on("readable",function(){var e,t;for(t=[];e=a.read();)t.push(n.push(e));return t}),a.on("error",function(t){return e(t)}),a.on("end",function(){return e(null,n.join(""))})),a},r=function(e={}){var t,n,o,a,u,s,c,l,f,d,p,h,m,v,y,g,_,b;for(g in _={},e)b=e[g],_[g]=b;switch(_.objectMode=!0,i.Transform.call(this,_),this.options=_,null==(t=this.options).delimiter&&(t.delimiter=","),null==(n=this.options).quote&&(n.quote='"'),null==(l=this.options).quoted&&(l.quoted=!1),null==(f=this.options).quotedEmpty&&(f.quotedEmpty=void 0),null==(d=this.options).quotedString&&(d.quotedString=!1),null==(p=this.options).eof&&(p.eof=!0),null==(h=this.options).escape&&(h.escape='"'),null==(m=this.options).header&&(m.header=!1),this.options.columns=r.normalize_columns(this.options.columns),null==(v=this.options).formatters&&(v.formatters={}),this.options.formatters.bool&&(this.options.formatters.boolean=this.options.formatters.bool),null==(y=this.options.formatters).string&&(y.string=function(e){return e}),null==(o=this.options.formatters).date&&(o.date=function(e){return""+e.getTime()}),null==(a=this.options.formatters).boolean&&(a.boolean=function(e){return e?"1":""}),null==(u=this.options.formatters).number&&(u.number=function(e){return""+e}),null==(s=this.options.formatters).object&&(s.object=function(e){return JSON.stringify(e)}),null==(c=this.options).rowDelimiter&&(c.rowDelimiter="\n"),null==this.countWriten&&(this.countWriten=0),this.options.rowDelimiter){case"auto":this.options.rowDelimiter=null;break;case"unix":this.options.rowDelimiter="\n";break;case"mac":this.options.rowDelimiter="\r";break;case"windows":this.options.rowDelimiter="\r\n";break;case"ascii":this.options.rowDelimiter="";break;case"unicode":this.options.rowDelimiter="\u2028"}return this},a.inherits(r,i.Transform),e.exports.Stringifier=r,r.prototype._transform=function(e,t,n){var o,i,a;if(null!=e){if(!(a="object"!=typeof e)){0!==this.countWriten||Array.isArray(e)||null==(o=this.options).columns&&(o.columns=r.normalize_columns(Object.keys(e)));try{this.emit("record",e,this.countWriten)}catch(e){return i=e,this.emit("error",i)}if(this.options.eof){if(null==(e=this.stringify(e)))return;e+=this.options.rowDelimiter}else{if(null==(e=this.stringify(e)))return;(this.options.header||this.countWriten)&&(e=this.options.rowDelimiter+e)}}return"number"==typeof e&&(e=`${e}`),0===this.countWriten&&this.headers(),a||this.countWriten++,this.push(e),n()}},r.prototype._flush=function(e){return 0===this.countWriten&&this.headers(),e()},r.prototype.stringify=function(e){var t,n,r,i,a,u,s,c,l,f,d,p,h,m,v,y,g,_,b,w,x,E;if("object"!=typeof e)return e;if(r=this.options.columns,c=this.options.delimiter,y=this.options.quote,f=this.options.escape,Array.isArray(e))r&&e.splice(r.length);else{if(t=[],r)for(p=h=0,g=r.length;0<=g?h<g:h>g;p=0<=g?++h:--h)E=o(e,r[p].key),t[p]=void 0===E||null===E?"":E;else for(n in e)t.push(e[n]);e=t,t=null}if(Array.isArray(e)){for(v="",p=m=0,_=e.length;0<=_?m<_:m>_;p=0<=_?++m:--m){x=typeof(d=e[p]);try{"string"===x?d=this.options.formatters.string(d):"number"===x?d=this.options.formatters.number(d):"boolean"===x?d=this.options.formatters.boolean(d):d instanceof Date?d=this.options.formatters.date(d):"object"===x&&null!==d&&(d=this.options.formatters.object(d))}catch(e){return l=e,void this.emit("error",l)}if(d){if("string"!=typeof d)return this.emit("error",Error("Formatter must return a string, null or undefined")),null;s=d.indexOf(c)>=0,a=""!==y&&d.indexOf(y)>=0,i=d.indexOf(f)>=0&&f!==y,u=d.indexOf(this.options.rowDelimiter)>=0,(w=a||s||u||this.options.quoted||this.options.quotedString&&"string"==typeof e[p])&&i&&(b="\\"===f?new RegExp(f+f,"g"):new RegExp(f,"g"),d=d.replace(b,f+f)),a&&(b=new RegExp(y,"g"),d=d.replace(b,f+y)),w&&(d=y+d+y),v+=d}else(this.options.quotedEmpty||null==this.options.quotedEmpty&&""===e[p]&&this.options.quotedString)&&(v+=y+y);p!==e.length-1&&(v+=c)}e=v}return e},r.prototype.headers=function(){var e;if(this.options.header&&this.options.columns)return e=this.options.columns.map(function(e){return e.header}),e=this.options.eof?this.stringify(e)+this.options.rowDelimiter:this.stringify(e),this.push(e)},r.normalize_columns=function(e){var t,n,r;if(null==e)return null;if(null!=e){if("object"!=typeof e)throw Error('Invalid option "columns": expect an array or an object');e=Array.isArray(e)?function(){var n,r,o;for(o=[],n=0,r=e.length;n<r;n++)if("string"==typeof(t=e[n]))o.push({key:t,header:t});else{if("object"!=typeof t||null==t||Array.isArray(t))throw Error("Invalid column definition: expect a string or an object");if(!t.key)throw Error('Invalid column definition: property "key" is required');null==t.header&&(t.header=t.key),o.push(t)}return o}():function(){var t;for(n in t=[],e)r=e[n],t.push({key:n,header:r});return t}()}return e}}).call(t,n(40))},function(e,t,n){e.exports=o;var r=n(109).EventEmitter;function o(){r.call(this)}n(32)(o,r),o.Readable=n(110),o.Writable=n(489),o.Duplex=n(490),o.Transform=n(491),o.PassThrough=n(492),o.Stream=o,o.prototype.pipe=function(e,t){var n=this;function o(t){e.writable&&!1===e.write(t)&&n.pause&&n.pause()}function i(){n.readable&&n.resume&&n.resume()}n.on("data",o),e.on("drain",i),e._isStdio||t&&!1===t.end||(n.on("end",u),n.on("close",s));var a=!1;function u(){a||(a=!0,e.end())}function s(){a||(a=!0,"function"==typeof e.destroy&&e.destroy())}function c(e){if(l(),0===r.listenerCount(this,"error"))throw e}function l(){n.removeListener("data",o),e.removeListener("drain",i),n.removeListener("end",u),n.removeListener("close",s),n.removeListener("error",c),e.removeListener("error",c),n.removeListener("end",l),n.removeListener("close",l),e.removeListener("close",l)}return n.on("error",c),e.on("error",c),n.on("end",l),n.on("close",l),e.on("close",l),e.emit("pipe",n),e}},function(e,t,n){"use strict";t.byteLength=function(e){var t=c(e),n=t[0],r=t[1];return 3*(n+r)/4-r},t.toByteArray=function(e){for(var t,n=c(e),r=n[0],a=n[1],u=new i(function(e,t,n){return 3*(t+n)/4-n}(0,r,a)),s=0,l=a>0?r-4:r,f=0;f<l;f+=4)t=o[e.charCodeAt(f)]<<18|o[e.charCodeAt(f+1)]<<12|o[e.charCodeAt(f+2)]<<6|o[e.charCodeAt(f+3)],u[s++]=t>>16&255,u[s++]=t>>8&255,u[s++]=255&t;2===a&&(t=o[e.charCodeAt(f)]<<2|o[e.charCodeAt(f+1)]>>4,u[s++]=255&t);1===a&&(t=o[e.charCodeAt(f)]<<10|o[e.charCodeAt(f+1)]<<4|o[e.charCodeAt(f+2)]>>2,u[s++]=t>>8&255,u[s++]=255&t);return u},t.fromByteArray=function(e){for(var t,n=e.length,o=n%3,i=[],a=0,u=n-o;a<u;a+=16383)i.push(f(e,a,a+16383>u?u:a+16383));1===o?(t=e[n-1],i.push(r[t>>2]+r[t<<4&63]+"==")):2===o&&(t=(e[n-2]<<8)+e[n-1],i.push(r[t>>10]+r[t>>4&63]+r[t<<2&63]+"="));return i.join("")};for(var r=[],o=[],i="undefined"!=typeof Uint8Array?Uint8Array:Array,a="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",u=0,s=a.length;u<s;++u)r[u]=a[u],o[a.charCodeAt(u)]=u;function c(e){var t=e.length;if(t%4>0)throw new Error("Invalid string. Length must be a multiple of 4");var n=e.indexOf("=");return-1===n&&(n=t),[n,n===t?0:4-n%4]}function l(e){return r[e>>18&63]+r[e>>12&63]+r[e>>6&63]+r[63&e]}function f(e,t,n){for(var r,o=[],i=t;i<n;i+=3)r=(e[i]<<16&16711680)+(e[i+1]<<8&65280)+(255&e[i+2]),o.push(l(r));return o.join("")}o["-".charCodeAt(0)]=62,o["_".charCodeAt(0)]=63},function(e,t){t.read=function(e,t,n,r,o){var i,a,u=8*o-r-1,s=(1<<u)-1,c=s>>1,l=-7,f=n?o-1:0,d=n?-1:1,p=e[t+f];for(f+=d,i=p&(1<<-l)-1,p>>=-l,l+=u;l>0;i=256*i+e[t+f],f+=d,l-=8);for(a=i&(1<<-l)-1,i>>=-l,l+=r;l>0;a=256*a+e[t+f],f+=d,l-=8);if(0===i)i=1-c;else{if(i===s)return a?NaN:1/0*(p?-1:1);a+=Math.pow(2,r),i-=c}return(p?-1:1)*a*Math.pow(2,i-r)},t.write=function(e,t,n,r,o,i){var a,u,s,c=8*i-o-1,l=(1<<c)-1,f=l>>1,d=23===o?Math.pow(2,-24)-Math.pow(2,-77):0,p=r?0:i-1,h=r?1:-1,m=t<0||0===t&&1/t<0?1:0;for(t=Math.abs(t),isNaN(t)||t===1/0?(u=isNaN(t)?1:0,a=l):(a=Math.floor(Math.log(t)/Math.LN2),t*(s=Math.pow(2,-a))<1&&(a--,s*=2),(t+=a+f>=1?d/s:d*Math.pow(2,1-f))*s>=2&&(a++,s/=2),a+f>=l?(u=0,a=l):a+f>=1?(u=(t*s-1)*Math.pow(2,o),a+=f):(u=t*Math.pow(2,f-1)*Math.pow(2,o),a=0));o>=8;e[n+p]=255&u,p+=h,u/=256,o-=8);for(a=a<<o|u,c+=o;c>0;e[n+p]=255&a,p+=h,a/=256,c-=8);e[n+p-h]|=128*m}},function(e,t){},function(e,t,n){"use strict";var r=n(71).Buffer,o=n(484);function i(e,t,n){e.copy(t,n)}e.exports=function(){function e(){!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,e),this.head=null,this.tail=null,this.length=0}return e.prototype.push=function(e){var t={data:e,next:null};this.length>0?this.tail.next=t:this.head=t,this.tail=t,++this.length},e.prototype.unshift=function(e){var t={data:e,next:this.head};0===this.length&&(this.tail=t),this.head=t,++this.length},e.prototype.shift=function(){if(0!==this.length){var e=this.head.data;return 1===this.length?this.head=this.tail=null:this.head=this.head.next,--this.length,e}},e.prototype.clear=function(){this.head=this.tail=null,this.length=0},e.prototype.join=function(e){if(0===this.length)return"";for(var t=this.head,n=""+t.data;t=t.next;)n+=e+t.data;return n},e.prototype.concat=function(e){if(0===this.length)return r.alloc(0);if(1===this.length)return this.head.data;for(var t=r.allocUnsafe(e>>>0),n=this.head,o=0;n;)i(n.data,t,o),o+=n.data.length,n=n.next;return t},e}(),o&&o.inspect&&o.inspect.custom&&(e.exports.prototype[o.inspect.custom]=function(){var e=o.inspect({length:this.length});return this.constructor.name+" "+e})},function(e,t){},function(e,t,n){(function(e){var r=void 0!==e&&e||"undefined"!=typeof self&&self||window,o=Function.prototype.apply;function i(e,t){this._id=e,this._clearFn=t}t.setTimeout=function(){return new i(o.call(setTimeout,r,arguments),clearTimeout)},t.setInterval=function(){return new i(o.call(setInterval,r,arguments),clearInterval)},t.clearTimeout=t.clearInterval=function(e){e&&e.close()},i.prototype.unref=i.prototype.ref=function(){},i.prototype.close=function(){this._clearFn.call(r,this._id)},t.enroll=function(e,t){clearTimeout(e._idleTimeoutId),e._idleTimeout=t},t.unenroll=function(e){clearTimeout(e._idleTimeoutId),e._idleTimeout=-1},t._unrefActive=t.active=function(e){clearTimeout(e._idleTimeoutId);var t=e._idleTimeout;t>=0&&(e._idleTimeoutId=setTimeout(function(){e._onTimeout&&e._onTimeout()},t))},n(486),t.setImmediate="undefined"!=typeof self&&self.setImmediate||void 0!==e&&e.setImmediate||this&&this.setImmediate,t.clearImmediate="undefined"!=typeof self&&self.clearImmediate||void 0!==e&&e.clearImmediate||this&&this.clearImmediate}).call(t,n(3))},function(e,t,n){(function(e,t){!function(e,n){"use strict";if(!e.setImmediate){var r,o=1,i={},a=!1,u=e.document,s=Object.getPrototypeOf&&Object.getPrototypeOf(e);s=s&&s.setTimeout?s:e,"[object process]"==={}.toString.call(e.process)?r=function(e){t.nextTick(function(){l(e)})}:function(){if(e.postMessage&&!e.importScripts){var t=!0,n=e.onmessage;return e.onmessage=function(){t=!1},e.postMessage("","*"),e.onmessage=n,t}}()?function(){var t="setImmediate$"+Math.random()+"$",n=function(n){n.source===e&&"string"==typeof n.data&&0===n.data.indexOf(t)&&l(+n.data.slice(t.length))};e.addEventListener?e.addEventListener("message",n,!1):e.attachEvent("onmessage",n),r=function(n){e.postMessage(t+n,"*")}}():e.MessageChannel?function(){var e=new MessageChannel;e.port1.onmessage=function(e){l(e.data)},r=function(t){e.port2.postMessage(t)}}():u&&"onreadystatechange"in u.createElement("script")?function(){var e=u.documentElement;r=function(t){var n=u.createElement("script");n.onreadystatechange=function(){l(t),n.onreadystatechange=null,e.removeChild(n),n=null},e.appendChild(n)}}():r=function(e){setTimeout(l,0,e)},s.setImmediate=function(e){"function"!=typeof e&&(e=new Function(""+e));for(var t=new Array(arguments.length-1),n=0;n<t.length;n++)t[n]=arguments[n+1];var a={callback:e,args:t};return i[o]=a,r(o),o++},s.clearImmediate=c}function c(e){delete i[e]}function l(e){if(a)setTimeout(l,0,e);else{var t=i[e];if(t){a=!0;try{!function(e){var t=e.callback,r=e.args;switch(r.length){case 0:t();break;case 1:t(r[0]);break;case 2:t(r[0],r[1]);break;case 3:t(r[0],r[1],r[2]);break;default:t.apply(n,r)}}(t)}finally{c(e),a=!1}}}}}("undefined"==typeof self?void 0===e?this:e:self)}).call(t,n(3),n(40))},function(e,t,n){(function(t){function n(e){try{if(!t.localStorage)return!1}catch(e){return!1}var n=t.localStorage[e];return null!=n&&"true"===String(n).toLowerCase()}e.exports=function(e,t){if(n("noDeprecation"))return e;var r=!1;return function(){if(!r){if(n("throwDeprecation"))throw new Error(t);n("traceDeprecation")?console.trace(t):console.warn(t),r=!0}return e.apply(this,arguments)}}}).call(t,n(3))},function(e,t,n){"use strict";e.exports=i;var r=n(207),o=n(54);function i(e){if(!(this instanceof i))return new i(e);r.call(this,e)}o.inherits=n(32),o.inherits(i,r),i.prototype._transform=function(e,t,n){n(null,e)}},function(e,t,n){e.exports=n(111)},function(e,t,n){e.exports=n(33)},function(e,t,n){e.exports=n(110).Transform},function(e,t,n){e.exports=n(110).PassThrough},function(e,t,n){(function(e,r){var o=/%[sdj%]/g;t.format=function(e){if(!y(e)){for(var t=[],n=0;n<arguments.length;n++)t.push(u(arguments[n]));return t.join(" ")}n=1;for(var r=arguments,i=r.length,a=String(e).replace(o,function(e){if("%%"===e)return"%";if(n>=i)return e;switch(e){case"%s":return String(r[n++]);case"%d":return Number(r[n++]);case"%j":try{return JSON.stringify(r[n++])}catch(e){return"[Circular]"}default:return e}}),s=r[n];n<i;s=r[++n])m(s)||!b(s)?a+=" "+s:a+=" "+u(s);return a},t.deprecate=function(n,o){if(g(e.process))return function(){return t.deprecate(n,o).apply(this,arguments)};if(!0===r.noDeprecation)return n;var i=!1;return function(){if(!i){if(r.throwDeprecation)throw new Error(o);r.traceDeprecation?console.trace(o):console.error(o),i=!0}return n.apply(this,arguments)}};var i,a={};function u(e,n){var r={seen:[],stylize:c};return arguments.length>=3&&(r.depth=arguments[2]),arguments.length>=4&&(r.colors=arguments[3]),h(n)?r.showHidden=n:n&&t._extend(r,n),g(r.showHidden)&&(r.showHidden=!1),g(r.depth)&&(r.depth=2),g(r.colors)&&(r.colors=!1),g(r.customInspect)&&(r.customInspect=!0),r.colors&&(r.stylize=s),l(r,e,r.depth)}function s(e,t){var n=u.styles[t];return n?"["+u.colors[n][0]+"m"+e+"["+u.colors[n][1]+"m":e}function c(e,t){return e}function l(e,n,r){if(e.customInspect&&n&&E(n.inspect)&&n.inspect!==t.inspect&&(!n.constructor||n.constructor.prototype!==n)){var o=n.inspect(r,e);return y(o)||(o=l(e,o,r)),o}var i=function(e,t){if(g(t))return e.stylize("undefined","undefined");if(y(t)){var n="'"+JSON.stringify(t).replace(/^"|"$/g,"").replace(/'/g,"\\'").replace(/\\"/g,'"')+"'";return e.stylize(n,"string")}if(v(t))return e.stylize(""+t,"number");if(h(t))return e.stylize(""+t,"boolean");if(m(t))return e.stylize("null","null")}(e,n);if(i)return i;var a=Object.keys(n),u=function(e){var t={};return e.forEach(function(e,n){t[e]=!0}),t}(a);if(e.showHidden&&(a=Object.getOwnPropertyNames(n)),x(n)&&(a.indexOf("message")>=0||a.indexOf("description")>=0))return f(n);if(0===a.length){if(E(n)){var s=n.name?": "+n.name:"";return e.stylize("[Function"+s+"]","special")}if(_(n))return e.stylize(RegExp.prototype.toString.call(n),"regexp");if(w(n))return e.stylize(Date.prototype.toString.call(n),"date");if(x(n))return f(n)}var c,b="",O=!1,S=["{","}"];(p(n)&&(O=!0,S=["[","]"]),E(n))&&(b=" [Function"+(n.name?": "+n.name:"")+"]");return _(n)&&(b=" "+RegExp.prototype.toString.call(n)),w(n)&&(b=" "+Date.prototype.toUTCString.call(n)),x(n)&&(b=" "+f(n)),0!==a.length||O&&0!=n.length?r<0?_(n)?e.stylize(RegExp.prototype.toString.call(n),"regexp"):e.stylize("[Object]","special"):(e.seen.push(n),c=O?function(e,t,n,r,o){for(var i=[],a=0,u=t.length;a<u;++a)k(t,String(a))?i.push(d(e,t,n,r,String(a),!0)):i.push("");return o.forEach(function(o){o.match(/^\d+$/)||i.push(d(e,t,n,r,o,!0))}),i}(e,n,r,u,a):a.map(function(t){return d(e,n,r,u,t,O)}),e.seen.pop(),function(e,t,n){if(e.reduce(function(e,t){return 0,t.indexOf("\n")>=0&&0,e+t.replace(/\u001b\[\d\d?m/g,"").length+1},0)>60)return n[0]+(""===t?"":t+"\n ")+" "+e.join(",\n ")+" "+n[1];return n[0]+t+" "+e.join(", ")+" "+n[1]}(c,b,S)):S[0]+b+S[1]}function f(e){return"["+Error.prototype.toString.call(e)+"]"}function d(e,t,n,r,o,i){var a,u,s;if((s=Object.getOwnPropertyDescriptor(t,o)||{value:t[o]}).get?u=s.set?e.stylize("[Getter/Setter]","special"):e.stylize("[Getter]","special"):s.set&&(u=e.stylize("[Setter]","special")),k(r,o)||(a="["+o+"]"),u||(e.seen.indexOf(s.value)<0?(u=m(n)?l(e,s.value,null):l(e,s.value,n-1)).indexOf("\n")>-1&&(u=i?u.split("\n").map(function(e){return" "+e}).join("\n").substr(2):"\n"+u.split("\n").map(function(e){return" "+e}).join("\n")):u=e.stylize("[Circular]","special")),g(a)){if(i&&o.match(/^\d+$/))return u;(a=JSON.stringify(""+o)).match(/^"([a-zA-Z_][a-zA-Z_0-9]*)"$/)?(a=a.substr(1,a.length-2),a=e.stylize(a,"name")):(a=a.replace(/'/g,"\\'").replace(/\\"/g,'"').replace(/(^"|"$)/g,"'"),a=e.stylize(a,"string"))}return a+": "+u}function p(e){return Array.isArray(e)}function h(e){return"boolean"==typeof e}function m(e){return null===e}function v(e){return"number"==typeof e}function y(e){return"string"==typeof e}function g(e){return void 0===e}function _(e){return b(e)&&"[object RegExp]"===O(e)}function b(e){return"object"==typeof e&&null!==e}function w(e){return b(e)&&"[object Date]"===O(e)}function x(e){return b(e)&&("[object Error]"===O(e)||e instanceof Error)}function E(e){return"function"==typeof e}function O(e){return Object.prototype.toString.call(e)}function S(e){return e<10?"0"+e.toString(10):e.toString(10)}t.debuglog=function(e){if(g(i)&&(i=r.env.NODE_DEBUG||""),e=e.toUpperCase(),!a[e])if(new RegExp("\\b"+e+"\\b","i").test(i)){var n=r.pid;a[e]=function(){var r=t.format.apply(t,arguments);console.error("%s %d: %s",e,n,r)}}else a[e]=function(){};return a[e]},t.inspect=u,u.colors={bold:[1,22],italic:[3,23],underline:[4,24],inverse:[7,27],white:[37,39],grey:[90,39],black:[30,39],blue:[34,39],cyan:[36,39],green:[32,39],magenta:[35,39],red:[31,39],yellow:[33,39]},u.styles={special:"cyan",number:"yellow",boolean:"yellow",undefined:"grey",null:"bold",string:"green",date:"magenta",regexp:"red"},t.isArray=p,t.isBoolean=h,t.isNull=m,t.isNullOrUndefined=function(e){return null==e},t.isNumber=v,t.isString=y,t.isSymbol=function(e){return"symbol"==typeof e},t.isUndefined=g,t.isRegExp=_,t.isObject=b,t.isDate=w,t.isError=x,t.isFunction=E,t.isPrimitive=function(e){return null===e||"boolean"==typeof e||"number"==typeof e||"string"==typeof e||"symbol"==typeof e||void 0===e},t.isBuffer=n(494);var T=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"];function k(e,t){return Object.prototype.hasOwnProperty.call(e,t)}t.log=function(){console.log("%s - %s",function(){var e=new Date,t=[S(e.getHours()),S(e.getMinutes()),S(e.getSeconds())].join(":");return[e.getDate(),T[e.getMonth()],t].join(" ")}(),t.format.apply(t,arguments))},t.inherits=n(32),t._extend=function(e,t){if(!t||!b(t))return e;for(var n=Object.keys(t),r=n.length;r--;)e[n[r]]=t[n[r]];return e}}).call(t,n(3),n(40))},function(e,t){e.exports=function(e){return e&&"object"==typeof e&&"function"==typeof e.copy&&"function"==typeof e.fill&&"function"==typeof e.readUInt8}},function(e,t,n){(function(t){var n="Expected a function",r="__lodash_hash_undefined__",o=1/0,i="[object Function]",a="[object GeneratorFunction]",u="[object Symbol]",s=/\.|\[(?:[^[\]]*|(["'])(?:(?!\1)[^\\]|\\.)*?\1)\]/,c=/^\w*$/,l=/^\./,f=/[^.[\]]+|\[(?:(-?\d+(?:\.\d+)?)|(["'])((?:(?!\2)[^\\]|\\.)*?)\2)\]|(?=(?:\.|\[\])(?:\.|\[\]|$))/g,d=/\\(\\)?/g,p=/^\[object .+?Constructor\]$/,h="object"==typeof t&&t&&t.Object===Object&&t,m="object"==typeof self&&self&&self.Object===Object&&self,v=h||m||Function("return this")();var y=Array.prototype,g=Function.prototype,_=Object.prototype,b=v["__core-js_shared__"],w=function(){var e=/[^.]+$/.exec(b&&b.keys&&b.keys.IE_PROTO||"");return e?"Symbol(src)_1."+e:""}(),x=g.toString,E=_.hasOwnProperty,O=_.toString,S=RegExp("^"+x.call(E).replace(/[\\^$.*+?()[\]{}|]/g,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$"),T=v.Symbol,k=y.splice,R=F(v,"Map"),j=F(Object,"create"),P=T?T.prototype:void 0,C=P?P.toString:void 0;function M(e){var t=-1,n=e?e.length:0;for(this.clear();++t<n;){var r=e[t];this.set(r[0],r[1])}}function I(e){var t=-1,n=e?e.length:0;for(this.clear();++t<n;){var r=e[t];this.set(r[0],r[1])}}function A(e){var t=-1,n=e?e.length:0;for(this.clear();++t<n;){var r=e[t];this.set(r[0],r[1])}}function D(e,t){for(var n=e.length;n--;)if($(e[n][0],t))return n;return-1}function N(e,t){for(var n=0,r=(t=function(e,t){if(q(e))return!1;var n=typeof e;if("number"==n||"symbol"==n||"boolean"==n||null==e||z(e))return!0;return c.test(e)||!s.test(e)||null!=t&&e in Object(t)}(t,e)?[t]:function(e){return q(e)?e:B(e)}(t)).length;null!=e&&n<r;)e=e[W(t[n++])];return n&&n==r?e:void 0}function L(e){return!(!H(e)||function(e){return!!w&&w in e}(e))&&(function(e){var t=H(e)?O.call(e):"";return t==i||t==a}(e)||function(e){var t=!1;if(null!=e&&"function"!=typeof e.toString)try{t=!!(e+"")}catch(e){}return t}(e)?S:p).test(function(e){if(null!=e){try{return x.call(e)}catch(e){}try{return e+""}catch(e){}}return""}(e))}function U(e,t){var n=e.__data__;return function(e){var t=typeof e;return"string"==t||"number"==t||"symbol"==t||"boolean"==t?"__proto__"!==e:null===e}(t)?n["string"==typeof t?"string":"hash"]:n.map}function F(e,t){var n=function(e,t){return null==e?void 0:e[t]}(e,t);return L(n)?n:void 0}M.prototype.clear=function(){this.__data__=j?j(null):{}},M.prototype.delete=function(e){return this.has(e)&&delete this.__data__[e]},M.prototype.get=function(e){var t=this.__data__;if(j){var n=t[e];return n===r?void 0:n}return E.call(t,e)?t[e]:void 0},M.prototype.has=function(e){var t=this.__data__;return j?void 0!==t[e]:E.call(t,e)},M.prototype.set=function(e,t){return this.__data__[e]=j&&void 0===t?r:t,this},I.prototype.clear=function(){this.__data__=[]},I.prototype.delete=function(e){var t=this.__data__,n=D(t,e);return!(n<0||(n==t.length-1?t.pop():k.call(t,n,1),0))},I.prototype.get=function(e){var t=this.__data__,n=D(t,e);return n<0?void 0:t[n][1]},I.prototype.has=function(e){return D(this.__data__,e)>-1},I.prototype.set=function(e,t){var n=this.__data__,r=D(n,e);return r<0?n.push([e,t]):n[r][1]=t,this},A.prototype.clear=function(){this.__data__={hash:new M,map:new(R||I),string:new M}},A.prototype.delete=function(e){return U(this,e).delete(e)},A.prototype.get=function(e){return U(this,e).get(e)},A.prototype.has=function(e){return U(this,e).has(e)},A.prototype.set=function(e,t){return U(this,e).set(e,t),this};var B=Y(function(e){e=function(e){return null==e?"":function(e){if("string"==typeof e)return e;if(z(e))return C?C.call(e):"";var t=e+"";return"0"==t&&1/e==-o?"-0":t}(e)}(e);var t=[];return l.test(e)&&t.push(""),e.replace(f,function(e,n,r,o){t.push(r?o.replace(d,"$1"):n||e)}),t});function W(e){if("string"==typeof e||z(e))return e;var t=e+"";return"0"==t&&1/e==-o?"-0":t}function Y(e,t){if("function"!=typeof e||t&&"function"!=typeof t)throw new TypeError(n);var r=function(){var n=arguments,o=t?t.apply(this,n):n[0],i=r.cache;if(i.has(o))return i.get(o);var a=e.apply(this,n);return r.cache=i.set(o,a),a};return r.cache=new(Y.Cache||A),r}function $(e,t){return e===t||e!=e&&t!=t}Y.Cache=A;var q=Array.isArray;function H(e){var t=typeof e;return!!e&&("object"==t||"function"==t)}function z(e){return"symbol"==typeof e||function(e){return!!e&&"object"==typeof e}(e)&&O.call(e)==u}e.exports=function(e,t,n){var r=null==e?void 0:N(e,t);return void 0===r?n:r}}).call(t,n(3))},function(e,t,n){(function(t){var n=function(){try{return Function("return this")()||(0,eval)("this")}catch(e){return"object"==typeof window&&window.window===window?window:"object"==typeof self&&self.self===self?self:"object"==typeof t&&t.global===t?t:this}}();function r(e,t,n){var r=new XMLHttpRequest;r.open("GET",e),r.responseType="blob",r.onload=function(){a(r.response,t,n)},r.onerror=function(){console.error("could not download file")},r.send()}function o(e){var t=new XMLHttpRequest;return t.open("HEAD",e,!1),t.send(),t.status>=200&&t.status<=299}function i(e){try{e.dispatchEvent(new MouseEvent("click"))}catch(n){var t=document.createEvent("MouseEvents");t.initMouseEvent("click",!0,!0,window,0,0,0,80,20,!1,!1,!1,!1,0,null),e.dispatchEvent(t)}}var a=n.saveAs||"object"!=typeof window||window!==n?function(){}:"download"in HTMLAnchorElement.prototype?function(e,t,a){var u=n.URL||n.webkitURL,s=document.createElement("a");t=t||e.name||"download",s.download=t,s.rel="noopener","string"==typeof e?(s.href=e,s.origin!==location.origin?o(s.href)?r(e,t,a):i(s,s.target="_blank"):i(s)):(s.href=u.createObjectURL(e),setTimeout(function(){u.revokeObjectURL(s.href)},4e4),setTimeout(function(){i(s)},0))}:"msSaveOrOpenBlob"in navigator?function(e,t,n){if(t=t||e.name||"download","string"==typeof e)if(o(e))r(e,t,n);else{var i=document.createElement("a");i.href=e,i.target="_blank",setTimeout(function(){clikc(i)})}else navigator.msSaveOrOpenBlob(function(e,t){return void 0===t?t={autoBom:!1}:"object"!=typeof t&&(console.warn("Depricated: Expected third argument to be a object"),t={autoBom:!t}),t.autoBom&&/^\s*(?:text\/\S*|application\/xml|\S*\/\S*\+xml)\s*;.*charset\s*=\s*utf-8/i.test(e.type)?new Blob([String.fromCharCode(65279),e],{type:e.type}):e}(e,n),t)}:function(e,t,o,i){if((i=i||open("","_blank"))&&(i.document.title=i.document.body.innerText="downloading..."),"string"==typeof e)return r(e,t,o);var a="application/octet-stream"===e.type,u=/constructor/i.test(n.HTMLElement)||n.safari,s=/CriOS\/[\d]+/.test(navigator.userAgent);if((s||a&&u)&&"object"==typeof FileReader){var c=new FileReader;c.onloadend=function(){var e=c.result;e=s?e:e.replace(/^data:[^;]*;/,"data:attachment/file;"),i?i.location.href=e:location=e,i=null},c.readAsDataURL(e)}else{var l=n.URL||n.webkitURL,f=l.createObjectURL(e);i?i.location=f:location.href=f,i=null,setTimeout(function(){l.revokeObjectURL(f)},4e4)}};e.exports=n.saveAs=a.saveAs=a}).call(t,n(3))},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=h(n(4)),o=h(n(5)),i=h(n(6)),a=h(n(7)),u=h(n(8)),s=h(n(9)),c=n(1),l=h(c),f=n(2),d=n(17),p=n(11);function h(e){return e&&e.__esModule?e:{default:e}}var m=function(e){function t(){var e,n,r,a;(0,i.default)(this,t);for(var s=arguments.length,c=Array(s),l=0;l<s;l++)c[l]=arguments[l];return n=r=(0,u.default)(this,(e=t.__proto__||(0,o.default)(t)).call.apply(e,[this].concat(c))),r.state={playing:!1},a=n,(0,u.default)(r,a)}return(0,s.default)(t,e),(0,a.default)(t,[{key:"render",value:function(){var e=this,t=this.props,n=t.app,r=t.data,o=t.size,i=this.state.playing,a=r.metadata.sugarcube.fp.replace("/var/www/files/","https://cube.syrianarchive.org/"),u=n.mediainfo,s=u.sha256,c=u.verified,f=n.mediainfo.metadata.mediainfo.video,p=n.keyframe.metadata.keyframe.basic[0];return l.default.createElement("div",{className:"video"},i?l.default.createElement("video",{src:a,autoPlay:!0,controls:!0,muted:!0}):l.default.createElement("div",{className:"bg",style:{width:d.widths[o||"sm"],height:d.widths[o||"sm"]/f.aspect_ratio,backgroundImage:"url("+(0,d.imageUrl)(c,s,p,o)+")"},onClick:function(){return e.setState({playing:!0})}},l.default.createElement("div",{className:"play"})))}}]),t}(c.Component);t.default=(0,f.connect)(function(){return{tag:"sugarcube"}})(function(e){return l.default.createElement(p.Gate,(0,r.default)({View:m},e))})},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=c(n(78)),o=c(n(499)),i=c(n(160)),a=c(n(28));t.TableObject=f,t.TableArray=d,t.TableTuples=function(e){var t=e.tag,n=e.list;return n?u.default.createElement("div",null,t&&u.default.createElement("h3",null,t),u.default.createElement("table",{className:"tableTuples "+t},u.default.createElement("tbody",null,n.map(function(e,n){var r=(0,o.default)(e),i=r[0],a=r.slice(1);return u.default.createElement("tr",{key:t+"_"+n},u.default.createElement("th",null,(0,s.formatName)(i)),a.map(function(e,t){return u.default.createElement(h,{key:n+"_"+t,value:e})}))})))):null},t.TableRow=p,t.TableCell=h;var u=c(n(1)),s=n(17);function c(e){return e&&e.__esModule?e:{default:e}}var l="__HR__";function f(e){var t=e.tag,n=e.object,r=e.order,o=e.summary;if(!n)return null;if("loading"===n)return u.default.createElement("div",{className:"tableObject loading"},t,": Loading");if(n.err)return u.default.createElement("div",{className:"tableObject error"},t," Error: ",n.err);var s=(0,a.default)(n);if(r){var c=s.reduce(function(e,t){var n=r.indexOf(t);return-1!==n?e.order.push([n,t]):e.alpha.push(t),e},{order:[],alpha:[]});s=c.order.sort(function(e,t){return e[0]-t[0]}).map(function(e){var t=(0,i.default)(e,2);t[0];return t[1]}),o||(s=s.concat(c.alpha.sort()))}else s=s.sort();return u.default.createElement("div",null,t&&u.default.createElement("h3",null,t),u.default.createElement("table",{className:"tableObject "+t},u.default.createElement("tbody",null,s.map(function(e,t){return u.default.createElement(p,{key:e+"_"+t,name:e,value:n[e]})}))))}function d(e){var t=e.tag,n=e.list;return n?u.default.createElement("div",null,t&&u.default.createElement("h3",null,t),u.default.createElement("table",{className:"tableArray "+t},u.default.createElement("tbody",null,n.map(function(e,n){return u.default.createElement("tr",{key:t+"_"+n},u.default.createElement(h,{value:e}))})))):null}function p(e){var t=e.name,n=e.value;return t===l?u.default.createElement("tr",null,u.default.createElement("th",{className:"tr"},u.default.createElement("hr",null))):u.default.createElement("tr",null,u.default.createElement("th",null,(0,s.formatName)(t)),u.default.createElement(h,{name:t,value:n}))}function h(e){var t=e.value;return t&&"object"===(void 0===t?"undefined":(0,r.default)(t))&&(t=t._raw?t.value:t.length?u.default.createElement(d,{nested:!0,tag:"",list:t}):u.default.createElement(f,{nested:!0,tag:"",object:t})),u.default.createElement("td",null,t)}},function(e,t,n){"use strict";t.__esModule=!0;var r=function(e){return e&&e.__esModule?e:{default:e}}(n(154));t.default=function(e){return Array.isArray(e)?e:(0,r.default)(e)}},function(e,t,n){var r=n(501);"string"==typeof r&&(r=[[e.i,r,""]]);var o={hmr:!0,transform:void 0,insertInto:void 0};n(49)(r,o);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(48)(!1)).push([e.i,'/* css boilerplate */\n\n* { box-sizing: border-box; }\nhtml,body {\n margin: 0; padding: 0;\n width: 100%; height: 100%;\n}\nbody {\n font-family: Helvetica, sans-serif;\n font-weight: 300;\n}\n\nh1 {\n\n}\nh2 {\n font-weight: normal;\n margin: 10px 0;\n padding: 3px;\n font-size: 24px;\n}\nh3 {\n font-weight: normal;\n margin: 10px 0 0 0;\n padding: 3px;\n font-size: 18px;\n}\nh4 {\n font-weight: 300;\n font-size: 12px;\n letter-spacing: 2px;\n color: #888;\n text-transform: uppercase;\n margin: 5px 10px;\n margin-top: 20px;\n}\nh4:first-child {\n margin-top: 10px;\n}\n\n.app {\n width: 100%;\n height: 100%;\n display: flex;\n flex-direction: row;\n align-items: flex-start;\n justify-content: flex-start;\n}\n\n/* header stuff */\n\nheader {\n width: 100%;\n background: #11f;\n color: white;\n align-items: stretch;\n display: flex;\n flex-wrap: wrap;\n justify-content: space-between;\n z-index: 3;\n}\nheader > section {\n justify-content: flex-start;\n align-items: center;\n display: flex;\n flex: 1 0;\n font-weight: bold;\n}\nheader > section:last-of-type {\n justify-content: flex-end;\n}\n\n/* sidebar / body columns */\n\n.sidebar {\n display: flex;\n flex-direction: column;\n justify-content: flex-start;\n align-items: flex-start;\n height: 100%;\n float: left;\n width: 200px;\n flex: 0 0 200px;\n padding: 10px;\n margin-right: 10px;\n}\n.sidebar a {\n display: block;\n padding: 10px 10px;\n text-decoration: none;\n color: #444;\n}\n.sidebar a.active {\n font-weight: bold;\n color: #222;\n}\n.body {\n display: flex;\n flex-direction: column;\n align-items: flex-start;\n justify-content: flex-start;\n flex-grow: 1;\n}\n.body > div {\n padding-bottom: 40px;\n}\n\n/* buttons / forms */\n\n.btn:focus, .btn:hover {\n background: #f1f1fc;\n color: #4b48d6 !important;\n text-decoration: none;\n}\n.btn {\n -webkit-appearance: none;\n -moz-appearance: none;\n appearance: none;\n background: #fff;\n border: .05rem solid;\n border-radius: 2px;\n margin-right: 5px;\n color: #11f;\n cursor: pointer;\n display: inline-block;\n font-size: .8rem;\n height: 1.8rem;\n line-height: 1rem;\n outline: none;\n padding: .35rem .4rem;\n text-align: center;\n text-decoration: none;\n -webkit-transition: all .2s ease;\n -o-transition: all .2s ease;\n transition: all .2s ease;\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n vertical-align: middle;\n white-space: nowrap;\n}\n.btn.reset,\n.btn.panic {\n color: #b00;\n}\n.btn.btn-primary {\n background: #11f;\n border-color: #11f;\n color: white;\n}\n.btn[disabled] {\n color: #bbb !important;\n border-color: #bbb !important;\n background: white !important;\n cursor: default;\n}\n.btn.btn-primary:focus,\n.btn.btn-primary:hover {\n background: #0808ee;\n color: white !important;\n}\n.row .btn {\n margin: 0 5px 0 0;\n}\ninput[type=text] {\n border: 1px solid #888;\n padding: 4px;\n font-size: 15px;\n}\n\n\n/* tables on metadata pages */\n\ntable {\n border: 0;\n margin: 0;\n padding: 0;\n border-spacing: 0;\n}\n.tableObject td,\n.tableObject th {\n padding: 3px;\n vertical-align: top;\n}\n.tableObject hr {\n width: 100%;\n color: transparent;\n border: 0;\n border-bottom: 1px solid #bbb;\n align: left;\n margin: 3px 0;\n padding: 0;\n}\n.tableObject th,\n.tableTuples th {\n min-width: 145px;\n text-align: left;\n text-transform: capitalize;\n padding: 3px;\n padding-right: 10px;\n font-weight: 300;\n color: #333;\n}\n.tableTuples td {\n text-align: right;\n padding: 3px;\n}\n.tableObject td {\n font-weight: normal;\n color: #000;\n}\n.tableObject .tableObject {\n border: 1px solid #ddd;\n}\n.tableArray {\n border: 1px solid #ddd;\n border-spacing: 0;\n}\n.tableArray td {\n border-bottom: 1px solid #ddd;\n}\n.gray {\n font-size: 12px;\n color: #888;\n display: block;\n}\n.sha256.heading {\n margin: 20px 0 0px;\n}\n.gray span {\n padding-right: 5px;\n}\n.gray {\n margin-bottom: 10px;\n}\n.gray a {\n color: #666;\n}\n\n.verified {\n color: #080;\n font-weight: bold;\n}\n.unverified {\n color: #f00;\n font-weight: 300;\n}\n\n.loading, .error {\n font-weight: normal;\n margin: 10px 0;\n padding: 3px;\n font-size: 24px;\n}\n\n.title {\n text-transform: capitalize;\n}\n.rect {\n position: absolute;\n}\n.rect { border: 1px solid rgba(0,0,255); background-color: rgba(0,0,255,0.1); }\n\n/* videos / video preloader */\n\nvideo {\n max-width: 640px;\n margin: 10px 0;\n}\n.video {\n margin: 0 0 10px 0;\n}\n.video .bg {\n cursor: pointer;\n position: relative;\n background-size: cover;\n}\n.video .play {\n position: absolute;\n top: 50%;\n left: 50%;\n transform: translate3d(-50%, -50%, 0);\n width: 20%;\n height: 20%;\n background-image: url(/search/static/img/play.png);\n background-position: center center;\n background-size: contain;\n background-repeat: no-repeat;\n}\n.desktop .video .play:hover {\n -webkit-filter: invert(60%) sepia(100%) saturate(500%) hue-rotate(160deg);\n}\n\n/* spectre.css loader */\n\n.loaderWrapper {\n display: inline-block;\n position: relative;\n width: .8rem;\n height: .8rem;\n padding: 10px;\n}\n.loader {\n color: transparent !important;\n min-height: .8rem;\n pointer-events: none;\n position: relative;\n}\n\n.loader::after {\n animation: loader 500ms infinite linear;\n border: .1rem solid #5755d9;\n border-radius: 50%;\n border-right-color: transparent;\n border-top-color: transparent;\n content: "";\n display: block;\n height: .8rem;\n left: 50%;\n margin-left: -.4rem;\n margin-top: -.4rem;\n position: absolute;\n top: 50%;\n width: .8rem;\n z-index: 1;\n}\n\n.loader.loader-lg {\n min-height: 2rem;\n}\n\n.loader.loader-lg::after {\n height: 1.6rem;\n margin-left: -.8rem;\n margin-top: -.8rem;\n width: 1.6rem;\n}\n\n@keyframes loader {\n 0% {\n transform: rotate(0deg);\n }\n 100% {\n transform: rotate(360deg);\n }\n}',""])},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=h(n(4)),o=h(n(5)),i=h(n(6)),a=h(n(7)),u=h(n(8)),s=h(n(9)),c=n(1),l=h(c),f=n(15),d=n(2),p=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(112));function h(e){return e&&e.__esModule?e:{default:e}}var m=function(e){function t(){return(0,i.default)(this,t),(0,u.default)(this,(t.__proto__||(0,o.default)(t)).apply(this,arguments))}return(0,s.default)(t,e),(0,a.default)(t,[{key:"componentDidMount",value:function(){var e=this.props.match.params.hash;this.fetch(e)}},{key:"componentDidUpdate",value:function(e){var t=this.props.match.params.hash,n=e.match.params.hash;t&&t!==n&&this.fetch(t)}},{key:"fetch",value:function(e){this.props.actions.setHash(e),this.props.actions.fetchMediaRecord(e),this.props.actions.fetchMetadata(e)}},{key:"render",value:function(){return l.default.createElement("span",{className:"gray sha256 heading"},"sha256: ",this.props.hash)}}]),t}(c.Component);t.default=(0,d.connect)(function(e){return{hash:e.metadata.hash}},function(e){return{actions:(0,f.bindActionCreators)((0,r.default)({},p),e)}})(m)},function(e,t,n){var r,o,i;!function(n,a){o=[t,e],void 0===(i="function"==typeof(r=a)?r.apply(t,o):r)||(e.exports=i)}(0,function(e,t){"use strict";var n={timeout:5e3,jsonpCallback:"callback",jsonpCallbackFunction:null};function r(e){try{delete window[e]}catch(t){window[e]=void 0}}function o(e){var t=document.getElementById(e);t&&document.getElementsByTagName("head")[0].removeChild(t)}t.exports=function(e){var t=arguments.length<=1||void 0===arguments[1]?{}:arguments[1],i=e,a=t.timeout||n.timeout,u=t.jsonpCallback||n.jsonpCallback,s=void 0;return new Promise(function(n,c){var l=t.jsonpCallbackFunction||"jsonp_"+Date.now()+"_"+Math.ceil(1e5*Math.random()),f=u+"_"+l;window[l]=function(e){n({ok:!0,json:function(){return Promise.resolve(e)}}),s&&clearTimeout(s),o(f),r(l)},i+=-1===i.indexOf("?")?"?":"&";var d=document.createElement("script");d.setAttribute("src",""+i+u+"="+l),t.charset&&d.setAttribute("charset",t.charset),d.id=f,document.getElementsByTagName("head")[0].appendChild(d),s=setTimeout(function(){c(new Error("JSONP request to "+e+" timed out")),r(l),o(f),window[l]=function(){r(l)}},a),d.onerror=function(){c(new Error("JSONP request to "+e+" failed")),r(l),o(f),s&&clearTimeout(s)}})}})},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=p(n(4)),o=p(n(5)),i=p(n(6)),a=p(n(7)),u=p(n(8)),s=p(n(9)),c=n(1),l=p(c),f=n(2),d=n(11);function p(e){return e&&e.__esModule?e:{default:e}}var h=function(e){function t(){return(0,i.default)(this,t),(0,u.default)(this,(t.__proto__||(0,o.default)(t)).apply(this,arguments))}return(0,s.default)(t,e),(0,a.default)(t,[{key:"render",value:function(){var e=this.props,t=e.data,n=e.summary,r=t.metadata.mediainfo,o=r.audio,i=r.video,a=[];return i&&a.push(l.default.createElement(d.TableObject,{key:"video",tag:"mediaInfo: video",object:i,order:["width","height","encoded_date","tagged_date","frame_count","frame_rate","aspect_ratio","duration"],summary:n})),o&&a.push(l.default.createElement(d.TableObject,{key:"audio",tag:"mediaInfo: audio",object:o,order:["codec","encoded_date"],summary:n})),l.default.createElement("div",null,a||l.default.createElement("div",null,"No media info found"))}}]),t}(c.Component);t.default=(0,f.connect)(function(e){return{tag:"mediainfo"}})(function(e){return l.default.createElement(d.Gate,(0,r.default)({View:h},e))})},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=d(n(5)),o=d(n(6)),i=d(n(7)),a=d(n(8)),u=d(n(9)),s=n(1),c=d(s),l=n(2),f=n(11);function d(e){return e&&e.__esModule?e:{default:e}}var p=function(e){function t(){return(0,o.default)(this,t),(0,a.default)(this,(t.__proto__||(0,r.default)(t)).apply(this,arguments))}return(0,u.default)(t,e),(0,i.default)(t,[{key:"render",value:function(){return c.default.createElement(f.TableObject,{tag:"mediaRecord",object:this.props.mediaRecord})}}]),t}(s.Component);t.default=(0,l.connect)(function(e){return{mediaRecord:e.metadata.mediaRecord}})(p)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(){return r.default.createElement("div",null,r.default.createElement(o.Sugarcube,{summary:!0}),r.default.createElement(o.MediaRecord,null),r.default.createElement(o.MediaInfo,{summary:!0}),r.default.createElement(o.Places365,{summary:!0}),r.default.createElement(o.Coco,{summary:!0}))};var r=function(e){return e&&e.__esModule?e:{default:e}}(n(1)),o=n(208)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=h(n(4)),o=h(n(5)),i=h(n(6)),a=h(n(7)),u=h(n(8)),s=h(n(9)),c=n(1),l=h(c),f=n(2),d=n(17),p=n(11);function h(e){return e&&e.__esModule?e:{default:e}}var m=function(e){function t(){return(0,i.default)(this,t),(0,u.default)(this,(t.__proto__||(0,o.default)(t)).apply(this,arguments))}return(0,s.default)(t,e),(0,a.default)(t,[{key:"render",value:function(){var e=this.props,t=e.data,n=e.list,r=t.sha256,o=t.verified,i=t.metadata.keyframe,a=(n?[n]:["dense","basic","expanded"]).map(function(e){return l.default.createElement("div",{key:e},l.default.createElement("h3",null,e),l.default.createElement("ul",{className:"meta"},l.default.createElement("li",null,(0,d.courtesyS)(i[e].length,"frame"))),l.default.createElement("div",{className:"thumbnails"},i[e].map(function(e){return l.default.createElement(p.Keyframe,{key:e,sha256:r,verified:o,frame:e,size:"th",showFrame:!0,showTimestamp:!0})})))});return l.default.createElement("div",{className:"keyframeLists"},a)}}]),t}(c.Component);t.default=(0,f.connect)(function(){return{tag:"keyframe"}})(function(e){return l.default.createElement(p.Gate,(0,r.default)({View:m},e))})},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=_(n(4)),o=_(n(28)),i=_(n(5)),a=_(n(6)),u=_(n(7)),s=_(n(8)),c=_(n(9)),l=n(1),f=_(l),d=n(16),p=n(2),h=n(17),m=n(11),v=g(n(21)),y=g(n(113));function g(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}function _(e){return e&&e.__esModule?e:{default:e}}var b=function(e){function t(){return(0,a.default)(this,t),(0,s.default)(this,(t.__proto__||(0,i.default)(t)).apply(this,arguments))}return(0,c.default)(t,e),(0,u.default)(t,[{key:"render",value:function(){var e=this.props,t=e.app,n=e.data,r=e.match,i=parseInt(r.params.frame,10),a=n.sha256,u=n.verified,s=t.mediainfo.metadata.mediainfo.video,c=s.width,l=s.height,p=s.aspect_ratio,g=t.keyframe.metadata.keyframe,_={places365:t.places365.metadata.places365[i],coco:t.coco.metadata.coco[i]},b=(0,o.default)(t.coco.metadata.coco).map(function(e){return parseInt(e,10)}).sort(function(e,t){return e-t}),w=b.length,x=b.indexOf(i),E=(x-1+w)%w,O=(x+1)%w,S=["dense","basic","expanded"].map(function(e){return e in g&&e}).filter(function(e){return!!e}).join(", "),T=["th","sm","md","lg"].map(function(e){return f.default.createElement("span",{key:e},f.default.createElement("a",{href:(0,h.imageUrl)(u,a,i,e),target:"_blank",rel:"noopener noreferrer"},"[",e,"]")," ")});return f.default.createElement("div",{className:"keyframeSummary"},f.default.createElement("h2",null,"Frame #",i),f.default.createElement("ul",{className:"meta"},f.default.createElement("li",null,f.default.createElement(d.Link,{to:(0,h.keyframeUri)(a,b[E])},"← #",b[E])),f.default.createElement("li",null,f.default.createElement(d.Link,{to:(0,h.metadataUri)(a,"keyframe")},"Index")),f.default.createElement("li",null,f.default.createElement(d.Link,{to:(0,h.keyframeUri)(a,b[O])},"#",b[O]," →"))),f.default.createElement(m.Keyframe,{sha256:a,frame:i,verified:u,size:"md",to:(0,h.imageUrl)(u,a,i,"lg"),aspectRatio:p,detectionList:[{labels:y.coco,detections:_.coco}]}),f.default.createElement(d.Link,{to:v.publicUrl.searchByVerifiedFrame(u,a,i),className:"btn"},"Search"),f.default.createElement(m.TableTuples,{tag:"Metadata",list:[["Width",c],["Height",l],["Keyframe sets",S],["Sizes",{_raw:!0,value:T}]]}),f.default.createElement(m.DetectionList,{tag:"Places365",detections:_.places365,labels:y.places365,showEmpty:!0}),f.default.createElement(m.DetectionList,{tag:"Coco",detections:_.coco,labels:y.coco,showEmpty:!0}))}}]),t}(l.Component);t.default=(0,p.connect)(function(){return{tag:"keyframe"}})(function(e){return f.default.createElement(m.Gate,(0,r.default)({View:b},e))})},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default="person\nbicycle\ncar\nmotorbike\naeroplane\nbus\ntrain\ntruck\nboat\ntraffic light\nfire hydrant\nstop sign\nparking meter\nbench\nbird\ncat\ndog\nhorse\nsheep\ncow\nelephant\nbear\nzebra\ngiraffe\nbackpack\numbrella\nhandbag\ntie\nsuitcase\nfrisbee\nskis\nsnowboard\nsports ball\nkite\nbaseball bat\nbaseball glove\nskateboard\nsurfboard\ntennis racket\nbottle\nwine glass\ncup\nfork\nknife\nspoon\nbowl\nbanana\napple\nsandwich\norange\nbroccoli\ncarrot\nhot dog\npizza\ndonut\ncake\nchair\nsofa\npottedplant\nbed\ndiningtable\ntoilet\ntvmonitor\nlaptop\nmouse\nremote\nkeyboard\ncell phone\nmicrowave\noven\ntoaster\nsink\nrefrigerator\nbook\nclock\nvase\nscissors\nteddy bear\nhair drier\ntoothbrush".split("\n")},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default="airfield\nairplane_cabin\nairport_terminal\nalcove\nalley\namphitheater\namusement_arcade\namusement_park\napartment_building/outdoor\naquarium\naqueduct\narcade\narch\narchaelogical_excavation\narchive\narena/hockey\narena/performance\narena/rodeo\narmy_base\nart_gallery\nart_school\nart_studio\nartists_loft\nassembly_line\nathletic_field/outdoor\natrium/public\nattic\nauditorium\nauto_factory\nauto_showroom\nbadlands\nbakery/shop\nbalcony/exterior\nbalcony/interior\nball_pit\nballroom\nbamboo_forest\nbank_vault\nbanquet_hall\nbar\nbarn\nbarndoor\nbaseball_field\nbasement\nbasketball_court/indoor\nbathroom\nbazaar/indoor\nbazaar/outdoor\nbeach\nbeach_house\nbeauty_salon\nbedchamber\nbedroom\nbeer_garden\nbeer_hall\nberth\nbiology_laboratory\nboardwalk\nboat_deck\nboathouse\nbookstore\nbooth/indoor\nbotanical_garden\nbow_window/indoor\nbowling_alley\nboxing_ring\nbridge\nbuilding_facade\nbullring\nburial_chamber\nbus_interior\nbus_station/indoor\nbutchers_shop\nbutte\ncabin/outdoor\ncafeteria\ncampsite\ncampus\ncanal/natural\ncanal/urban\ncandy_store\ncanyon\ncar_interior\ncarrousel\ncastle\ncatacomb\ncemetery\nchalet\nchemistry_lab\nchilds_room\nchurch/indoor\nchurch/outdoor\nclassroom\nclean_room\ncliff\ncloset\nclothing_store\ncoast\ncockpit\ncoffee_shop\ncomputer_room\nconference_center\nconference_room\nconstruction_site\ncorn_field\ncorral\ncorridor\ncottage\ncourthouse\ncourtyard\ncreek\ncrevasse\ncrosswalk\ndam\ndelicatessen\ndepartment_store\ndesert/sand\ndesert/vegetation\ndesert_road\ndiner/outdoor\ndining_hall\ndining_room\ndiscotheque\ndoorway/outdoor\ndorm_room\ndowntown\ndressing_room\ndriveway\ndrugstore\nelevator/door\nelevator_lobby\nelevator_shaft\nembassy\nengine_room\nentrance_hall\nescalator/indoor\nexcavation\nfabric_store\nfarm\nfastfood_restaurant\nfield/cultivated\nfield/wild\nfield_road\nfire_escape\nfire_station\nfishpond\nflea_market/indoor\nflorist_shop/indoor\nfood_court\nfootball_field\nforest/broadleaf\nforest_path\nforest_road\nformal_garden\nfountain\ngalley\ngarage/indoor\ngarage/outdoor\ngas_station\ngazebo/exterior\ngeneral_store/indoor\ngeneral_store/outdoor\ngift_shop\nglacier\ngolf_course\ngreenhouse/indoor\ngreenhouse/outdoor\ngrotto\ngymnasium/indoor\nhangar/indoor\nhangar/outdoor\nharbor\nhardware_store\nhayfield\nheliport\nhighway\nhome_office\nhome_theater\nhospital\nhospital_room\nhot_spring\nhotel/outdoor\nhotel_room\nhouse\nhunting_lodge/outdoor\nice_cream_parlor\nice_floe\nice_shelf\nice_skating_rink/indoor\nice_skating_rink/outdoor\niceberg\nigloo\nindustrial_area\ninn/outdoor\nislet\njacuzzi/indoor\njail_cell\njapanese_garden\njewelry_shop\njunkyard\nkasbah\nkennel/outdoor\nkindergarden_classroom\nkitchen\nlagoon\nlake/natural\nlandfill\nlanding_deck\nlaundromat\nlawn\nlecture_room\nlegislative_chamber\nlibrary/indoor\nlibrary/outdoor\nlighthouse\nliving_room\nloading_dock\nlobby\nlock_chamber\nlocker_room\nmansion\nmanufactured_home\nmarket/indoor\nmarket/outdoor\nmarsh\nmartial_arts_gym\nmausoleum\nmedina\nmezzanine\nmoat/water\nmosque/outdoor\nmotel\nmountain\nmountain_path\nmountain_snowy\nmovie_theater/indoor\nmuseum/indoor\nmuseum/outdoor\nmusic_studio\nnatural_history_museum\nnursery\nnursing_home\noast_house\nocean\noffice\noffice_building\noffice_cubicles\noilrig\noperating_room\norchard\norchestra_pit\npagoda\npalace\npantry\npark\nparking_garage/indoor\nparking_garage/outdoor\nparking_lot\npasture\npatio\npavilion\npet_shop\npharmacy\nphone_booth\nphysics_laboratory\npicnic_area\npier\npizzeria\nplayground\nplayroom\nplaza\npond\nporch\npromenade\npub/indoor\nracecourse\nraceway\nraft\nrailroad_track\nrainforest\nreception\nrecreation_room\nrepair_shop\nresidential_neighborhood\nrestaurant\nrestaurant_kitchen\nrestaurant_patio\nrice_paddy\nriver\nrock_arch\nroof_garden\nrope_bridge\nruin\nrunway\nsandbox\nsauna\nschoolhouse\nscience_museum\nserver_room\nshed\nshoe_shop\nshopfront\nshopping_mall/indoor\nshower\nski_resort\nski_slope\nsky\nskyscraper\nslum\nsnowfield\nsoccer_field\nstable\nstadium/baseball\nstadium/football\nstadium/soccer\nstage/indoor\nstage/outdoor\nstaircase\nstorage_room\nstreet\nsubway_station/platform\nsupermarket\nsushi_bar\nswamp\nswimming_hole\nswimming_pool/indoor\nswimming_pool/outdoor\nsynagogue/outdoor\ntelevision_room\ntelevision_studio\ntemple/asia\nthrone_room\nticket_booth\ntopiary_garden\ntower\ntoyshop\ntrain_interior\ntrain_station/platform\ntree_farm\ntree_house\ntrench\ntundra\nunderwater/ocean_deep\nutility_room\nvalley\nvegetable_garden\nveterinarians_office\nviaduct\nvillage\nvineyard\nvolcano\nvolleyball_court/outdoor\nwaiting_room\nwater_park\nwater_tower\nwaterfall\nwatering_hole\nwave\nwet_bar\nwheat_field\nwind_farm\nwindmill\nyard\nyouth_hostel\nzen_garden".split("\n")},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=p(n(4)),o=p(n(5)),i=p(n(6)),a=p(n(7)),u=p(n(8)),s=p(n(9)),c=n(1),l=p(c),f=n(2),d=n(11);function p(e){return e&&e.__esModule?e:{default:e}}var h=function(e){function t(){return(0,i.default)(this,t),(0,u.default)(this,(t.__proto__||(0,o.default)(t)).apply(this,arguments))}return(0,s.default)(t,e),(0,a.default)(t,[{key:"render",value:function(){var e=this.props.data.metadata;return l.default.createElement(d.TableObject,{tag:"Keyframe Status",object:e})}}]),t}(c.Component);t.default=(0,f.connect)(function(){return{tag:"keyframe_status"}})(function(e){return l.default.createElement(d.Gate,(0,r.default)({View:h},e))})},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=h(n(4)),o=h(n(5)),i=h(n(6)),a=h(n(7)),u=h(n(8)),s=h(n(9)),c=n(1),l=h(c),f=n(2),d=n(11),p=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(113));function h(e){return e&&e.__esModule?e:{default:e}}var m=function(e){function t(){return(0,i.default)(this,t),(0,u.default)(this,(t.__proto__||(0,o.default)(t)).apply(this,arguments))}return(0,s.default)(t,e),(0,a.default)(t,[{key:"render",value:function(){var e=this.props,t=e.app,n=e.data,r=e.summary,o=e.showAll,i=n.metadata,a=n.sha256,u=n.verified,s=t.mediainfo.metadata.mediainfo.video.aspect_ratio;return console.log(this.props.data),l.default.createElement(d.Classifier,{tag:"Coco",sha256:a,verified:u,keyframes:i.coco,labels:p.coco,summary:r,aspectRatio:s,showAll:o})}}]),t}(c.Component);t.default=(0,f.connect)(function(){return{tag:"coco"}})(function(e){return l.default.createElement(d.Gate,(0,r.default)({View:m},e))})},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=h(n(4)),o=h(n(5)),i=h(n(6)),a=h(n(7)),u=h(n(8)),s=h(n(9)),c=n(1),l=h(c),f=n(2),d=n(11),p=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(113));function h(e){return e&&e.__esModule?e:{default:e}}var m=function(e){function t(){return(0,i.default)(this,t),(0,u.default)(this,(t.__proto__||(0,o.default)(t)).apply(this,arguments))}return(0,s.default)(t,e),(0,a.default)(t,[{key:"render",value:function(){var e=this.props,t=e.data,n=e.summary,r=t.metadata,o=t.sha256,i=t.verified;return console.log(this.props.data),l.default.createElement(d.Classifier,{tag:"Places365",sha256:o,verified:i,keyframes:r.places365,labels:p.places365,summary:n})}}]),t}(c.Component);t.default=(0,f.connect)(function(){return{tag:"places365"}})(function(e){return l.default.createElement(d.Gate,(0,r.default)({View:m},e))})},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=p(n(4)),o=p(n(5)),i=p(n(6)),a=p(n(7)),u=p(n(8)),s=p(n(9)),c=n(1),l=p(c),f=n(2),d=n(11);function p(e){return e&&e.__esModule?e:{default:e}}var h=function(e){function t(){var e,n,r,a;(0,i.default)(this,t);for(var s=arguments.length,c=Array(s),l=0;l<s;l++)c[l]=arguments[l];return n=r=(0,u.default)(this,(e=t.__proto__||(0,o.default)(t)).call.apply(e,[this].concat(c))),r.state={playing:!1},a=n,(0,u.default)(r,a)}return(0,s.default)(t,e),(0,a.default)(t,[{key:"render",value:function(){var e=this.props,t=e.data,n=e.summary,r=(this.state.playing,t.metadata.sugarcube);r.fp.replace("/var/www/files/","https://cube.syrianarchive.org/");return l.default.createElement("div",{className:"sugarcube"},l.default.createElement(d.Video,null),!n&&l.default.createElement(d.TableObject,{tag:"Sugarcube",object:r}))}}]),t}(c.Component);t.default=(0,f.connect)(function(){return{tag:"sugarcube"}})(function(e){return l.default.createElement(d.Gate,(0,r.default)({View:h},e))})},function(e,t,n){var r=n(516);"string"==typeof r&&(r=[[e.i,r,""]]);var o={hmr:!0,transform:void 0,insertInto:void 0};n(49)(r,o);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(48)(!1)).push([e.i,".thumbnails {\n display: flex;\n flex-direction: row;\n flex-wrap: wrap;\n padding: 3px;\n}\n.keyframe {\n padding: 3px 3px 3px 3px;\n margin: 0 10px 20px 0;\n box-shadow: 0 0 0 rgba(0,0,0,0);\n transition: all 0.4ms;\n}\n.keyframe.isSaved {\n background-color: #ecebb5;\n box-shadow: 0 2px 2px rgba(0,0,0,0.2);\n}\n.keyframe img {\n background-color: #eee;\n}\n.thumbnails a {\n text-decoration: none;\n}\n.desktop .thumbnails .keyframe:hover {\n background: #eee;\n}\n.keyframe a {\n position: relative;\n display: block;\n}\n.detectionList label,\n.keyframe label {\n display: flex;\n flex-direction: row;\n justify-content: space-between;\n padding-bottom: 3px;\n}\n.keyframe label {\n color: #888;\n font-size: 14px;\n}\n.keyframe.th, .keyframe.th img { width: 160px; }\n.keyframe.sm, .keyframe.sm img { width: 320px; }\n.keyframe.md, .keyframe.md img { width: 640px; }\n.keyframe.lg, .keyframe.lg img { width: 1280px; }\n.keyframe.th img {\n min-height: 90px;\n}\n.keyframe .sha256 {\n display: inline-block;\n min-width: auto;\n max-width: 60px;\n margin-right: 5px;\n overflow: hidden;\n}\n.keyframe label small {\n display: flex;\n align-items: flex-start;\n}\n.keyframes {\n display: flex;\n flex-direction: row;\n flex-wrap: wrap;\n}\n\n.keyframeSummary .tableTuples td {\n text-align: left;\n}\n.keyframeSummary .detectionList {\n width: 326px;\n display: block;\n}\n.keyframeSummary .detectionList small {\n padding: 3px;\n font-size: inherit;\n}\n\nul.meta {\n list-style-type: none;\n margin: 3px 0 10px 0;\n padding: 0;\n padding-left: 3px;\n font-size: 12px;\n}\nul.meta li {\n list-style-type: none;\n display: inline-block;\n margin: 0; padding: 0;\n}\nul.meta li:first-child:before {\n content: '';\n padding: 0;\n}\nul.meta li:before {\n content: '\\B7';\n padding: 0 5px;\n}\n\n.sugarcube {\n margin-top: 10px;\n}",""])},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.Browse=t.Results=t.Query=t.Meta=t.Container=t.Menu=void 0;var r=c(n(518)),o=c(n(520)),i=c(n(114)),a=c(n(210)),u=c(n(211)),s=c(n(525));function c(e){return e&&e.__esModule?e:{default:e}}n(526),t.Menu=r.default,t.Container=o.default,t.Meta=i.default,t.Query=a.default,t.Results=u.default,t.Browse=s.default},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=v(n(4)),o=v(n(5)),i=v(n(6)),a=v(n(7)),u=v(n(8)),s=v(n(9)),c=n(1),l=v(c),f=n(16),d=n(15),p=n(2),h=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(21)),m=v(n(519));function v(e){return e&&e.__esModule?e:{default:e}}var y=function(e){function t(){return(0,i.default)(this,t),(0,u.default)(this,(t.__proto__||(0,o.default)(t)).apply(this,arguments))}return(0,s.default)(t,e),(0,a.default)(t,[{key:"upload",value:function(e){var t=e.dataTransfer?e.dataTransfer.files:e.target.files,n=void 0,r=void 0;for(n=0;n<t.length&&(!(r=t[n])||!r.type.match("image.*"));n++);r&&this.props.actions.upload(r)}},{key:"random",value:function(){this.props.actions.random()}},{key:"render",value:function(){var e=this,t=this.props,n=t.savedCount,r=t.options;return l.default.createElement("div",{className:"searchForm row"},l.default.createElement("div",{className:"row"},l.default.createElement("div",{className:"upload"},l.default.createElement("button",{className:"btn"},l.default.createElement("span",null,"⤴")," Search by Upload"),l.default.createElement("input",{type:"file",name:"img",accept:"image/*",onChange:this.upload.bind(this),required:!0})),l.default.createElement("button",{className:"btn random",onClick:this.random.bind(this)},l.default.createElement("span",null,"♘")," Random"),l.default.createElement(m.default,null),l.default.createElement(f.Link,{to:h.publicUrl.review()},l.default.createElement("button",{className:"btn btn-primary"},l.default.createElement("span",null,"⇪")," "+n+" Saved Image"+(1===n?"":"s")))),l.default.createElement("div",{className:"row searchOptions"},l.default.createElement("select",{className:"form-select",onChange:function(t){return e.props.actions.updateOptions({thumbnailSize:t.target.value})},value:r.thumbnailSize},l.default.createElement("option",{value:"th"},"Thumbnail"),l.default.createElement("option",{value:"sm"},"Small"),l.default.createElement("option",{value:"md"},"Medium"),l.default.createElement("option",{value:"lg"},"Large")),l.default.createElement("label",{className:"row"},l.default.createElement("input",{type:"checkbox",checked:r.groupByHash,onChange:function(t){return e.props.actions.updateOptions({groupByHash:t.target.checked})}})," Group by hash"),l.default.createElement("label",{className:"row"},l.default.createElement("input",{type:"number",value:r.perPage,className:"perPage",min:1,max:100,onChange:function(t){return e.props.actions.updateOptions({perPage:t.target.value})},onBlur:function(){return window.location.reload()}})," per page")))}}]),t}(c.Component);t.default=(0,p.connect)(function(e){return{options:e.search.options,savedCount:e.review.count}},function(e){return{actions:(0,d.bindActionCreators)((0,r.default)({},h),e)}})(y)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=h(n(5)),o=h(n(6)),i=h(n(7)),a=h(n(8)),u=h(n(9)),s=n(1),c=h(s),l=n(16),f=n(15),d=n(2),p=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(21));function h(e){return e&&e.__esModule?e:{default:e}}var m=function(e){function t(){(0,o.default)(this,t);var e=(0,a.default)(this,(t.__proto__||(0,r.default)(t)).call(this));return e.keydown=e.keydown.bind(e),e}return(0,u.default)(t,e),(0,i.default)(t,[{key:"componentDidMount",value:function(){document.addEventListener("keydown",this.keydown)}},{key:"componentWillUnmount",value:function(){document.removeEventListener("keydown",this.keydown)}},{key:"keydown",value:function(e){27===e.keyCode&&this.panic()}},{key:"panic",value:function(){this.props.actions.panic(),this.props.history.push("/search/")}},{key:"render",value:function(){var e=this;return c.default.createElement("button",{className:"btn panic",onClick:function(){return e.panic()}},c.default.createElement("span",null,"⚠")," Panic")}}]),t}(s.Component);t.default=(0,l.withRouter)((0,d.connect)(function(e){return{}},function(e){return{actions:(0,f.bindActionCreators)({panic:p.panic},e)}})(m))},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=x(n(4)),o=x(n(93)),i=x(n(5)),a=x(n(6)),u=x(n(7)),s=x(n(8)),c=x(n(9)),l=n(1),f=x(l),d=n(16),p=n(15),h=n(2),m=w(n(209)),v=w(n(21)),y=w(n(112)),g=x(n(210)),_=x(n(211)),b=x(n(524));function w(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}function x(e){return e&&e.__esModule?e:{default:e}}var E=function(e){function t(){return(0,a.default)(this,t),(0,s.default)(this,(t.__proto__||(0,i.default)(t)).apply(this,arguments))}return(0,c.default)(t,e),(0,u.default)(t,[{key:"componentDidMount",value:function(){var e=m.parse(this.props.location.search.substr(1));e&&e.url?this.props.searchActions.search(e.url):this.searchByHash()}},{key:"componentDidUpdate",value:function(e){e.match.params!==this.props.match.params&&(0,o.default)(this.props.match.params)!==(0,o.default)(e.match.params)&&this.searchByHash()}},{key:"searchByHash",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:0,t=this.props.match.params,n=t.verified,r=t.hash,o=t.frame;n&&r&&o?this.props.searchActions.searchByVerifiedFrame(n,r,o,e):r&&o&&this.props.searchActions.searchByFrame(r,o,e),r&&!e&&this.props.metadataActions.fetchMetadata(r)}},{key:"searchByOffset",value:function(){var e=this.props.query.results.length,t=m.parse(this.props.location.search.substr(1));t&&t.url?this.props.searchActions.search(t.url,e):this.searchByHash(e)}},{key:"render",value:function(){var e=this,t=this.props.query,n=t.query,r=t.results,o=t.loadingMore,i=this.props.options,a=!0;n&&!n.reset&&!n.loading&&r&&r.length||(a=!1);var u=r&&r.length>Math.min(i.perPage,30),s=r&&r.length>i.perPage;return f.default.createElement("div",{className:"searchContainer"},f.default.createElement(g.default,null),f.default.createElement(_.default,null),a?o?f.default.createElement("div",{className:"loadingMore"},"Loading more results..."):f.default.createElement("button",{onClick:function(){return e.searchByOffset()},className:u?"btn loadMore wide":"btn loadMore"},"Load more"):f.default.createElement("div",null),!s&&f.default.createElement(b.default,null))}}]),t}(l.Component);t.default=(0,d.withRouter)((0,h.connect)(function(e){return{query:e.search.query,options:e.search.options,metadata:e.metadata}},function(e){return{searchActions:(0,p.bindActionCreators)((0,r.default)({},v),e),metadataActions:(0,p.bindActionCreators)((0,r.default)({},y),e)}})(E))},function(e,t,n){"use strict";function r(e,t){return Object.prototype.hasOwnProperty.call(e,t)}e.exports=function(e,t,n,i){t=t||"&",n=n||"=";var a={};if("string"!=typeof e||0===e.length)return a;var u=/\+/g;e=e.split(t);var s=1e3;i&&"number"==typeof i.maxKeys&&(s=i.maxKeys);var c=e.length;s>0&&c>s&&(c=s);for(var l=0;l<c;++l){var f,d,p,h,m=e[l].replace(u,"%20"),v=m.indexOf(n);v>=0?(f=m.substr(0,v),d=m.substr(v+1)):(f=m,d=""),p=decodeURIComponent(f),h=decodeURIComponent(d),r(a,p)?o(a[p])?a[p].push(h):a[p]=[a[p],h]:a[p]=h}return a};var o=Array.isArray||function(e){return"[object Array]"===Object.prototype.toString.call(e)}},function(e,t,n){"use strict";var r=function(e){switch(typeof e){case"string":return e;case"boolean":return e?"true":"false";case"number":return isFinite(e)?e:"";default:return""}};e.exports=function(e,t,n,u){return t=t||"&",n=n||"=",null===e&&(e=void 0),"object"==typeof e?i(a(e),function(a){var u=encodeURIComponent(r(a))+n;return o(e[a])?i(e[a],function(e){return u+encodeURIComponent(r(e))}).join(t):u+encodeURIComponent(r(e[a]))}).join(t):u?encodeURIComponent(r(u))+n+encodeURIComponent(r(e)):""};var o=Array.isArray||function(e){return"[object Array]"===Object.prototype.toString.call(e)};function i(e,t){if(e.map)return e.map(t);for(var n=[],r=0;r<e.length;r++)n.push(t(e[r],r));return n}var a=Object.keys||function(e){var t=[];for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&t.push(n);return t}},function(e,t){var n=window.Blob,r=100==new n([new Uint8Array(100)]).size;e.exports=function(e){for(var t=e.split(",")[1],o=atob(t),i=new ArrayBuffer(o.length),a=new Uint8Array(i),u=0;u<o.length;u++)a[u]=o.charCodeAt(u);r||(a=i);var s=new n([a],{type:function(e){return e.split(";")[0].slice(5)}(e)});return s.slice=s.slice||s.webkitSlice,s}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(){return r.default.createElement("div",{className:"safety"},r.default.createElement("div",null,r.default.createElement("h4",null,"Safety Tips"),r.default.createElement("ul",null,r.default.createElement("li",null," Look away if you see something traumatic "),r.default.createElement("li",null," Hit ",r.default.createElement("tt",null,"ESC")," to activate panic mode (hides all images) "),r.default.createElement("li",null," Use thumbnails to reduce details "),r.default.createElement("li",null," Take breaks and refresh yourself with positive imagery "))))};var r=function(e){return e&&e.__esModule?e:{default:e}}(n(1))},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=_(n(4)),o=_(n(5)),i=_(n(6)),a=_(n(7)),u=_(n(8)),s=_(n(9)),c=n(1),l=_(c),f=n(16),d=n(15),p=n(2),h=n(11),m=g(n(21)),v=g(n(112)),y=_(n(114));function g(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}function _(e){return e&&e.__esModule?e:{default:e}}var b=function(e){function t(){return(0,i.default)(this,t),(0,u.default)(this,(t.__proto__||(0,o.default)(t)).apply(this,arguments))}return(0,s.default)(t,e),(0,a.default)(t,[{key:"componentDidMount",value:function(){this.browse()}},{key:"componentDidUpdate",value:function(e){e.match.params!==this.props.match.params&&this.browse()}},{key:"browse",value:function(){var e=this.props.match.params.hash;e&&this.props.searchActions.browse(e),e&&this.props.metadataActions.fetchMetadata(e)}},{key:"render",value:function(){var e=this.props,t=e.browse;e.options;return console.log("browse",t),!t||t.reset||t.loading?l.default.createElement("div",{className:"browseComponent column"},l.default.createElement("h3",null,"Loading keyframes..."),l.default.createElement(h.Loader,null)):l.default.createElement("div",{className:"browseComponent column"},l.default.createElement("h3",null,"Video Preview"),l.default.createElement(h.Video,{size:"md"}),l.default.createElement(y.default,{query:t,sugarcube:!0}),l.default.createElement("div",{className:"row buttons"},l.default.createElement(f.Link,{to:"/metadata/"+t.hash,className:"btn"},"View Full Metadata")),l.default.createElement("h3",null,"Keyframes"),l.default.createElement(h.Keyframes,{frames:t.frames,showHash:!0,showTimestamp:!0,showSearchButton:!0,showSaveButton:!0}))}}]),t}(c.Component);t.default=(0,f.withRouter)((0,p.connect)(function(e){return{browse:e.search.browse,options:e.search.options,metadata:e.metadata}},function(e){return{searchActions:(0,d.bindActionCreators)((0,r.default)({},m),e),metadataActions:(0,d.bindActionCreators)((0,r.default)({},v),e)}})(b))},function(e,t,n){var r=n(527);"string"==typeof r&&(r=[[e.i,r,""]]);var o={hmr:!0,transform:void 0,insertInto:void 0};n(49)(r,o);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(48)(!1)).push([e.i,".btn span {\n font-size: large;\n}\n.row {\n display: flex;\n flex-direction: row;\n}\n.column {\n display: flex;\n flex-direction: column;\n}\n\n.searchContainer h3 {\n padding: 0;\n margin-top: 0;\n margin-bottom: 5px;\n margin-left: 3px;\n}\n.searchContainer h4 {\n margin-left: 0;\n width: 100%;\n}\n.searchContainer .subtitle {\n display: block;\n margin-left: 3px;\n margin-bottom: 10px;\n}\n.searchForm {\n display: flex;\n justify-content: space-between;\n align-items: center;\n width: 100%;\n padding: 20px;\n background: #eee;\n}\n.searchForm .row {\n align-items: center;\n}\n\n.searchMeta {\n display: flex;\n flex-direction: column;\n font-size: 14px;\n line-height: 18px;\n padding: 0;\n}\n.searchMeta span {\n white-space: nowrap;\n overflow: hidden;\n text-overflow: ellipsis;\n max-width: calc(100vw - 23px - 640px - 30px);\n}\n\n.keyframe .thumbnail {\n position: relative;\n cursor: pointer;\n}\n.keyframe .searchButtons {\n position: absolute;\n bottom: 0; left: 0;\n padding: 0 5px 15px 5px;\n width: 100%;\n text-align: center;\n opacity: 0;\n transition: all 0.2s;\n}\n.desktop .keyframe .thumbnail:hover .searchButtons,\n.mobile .keyframe .searchButtons {\n opacity: 1;\n}\n.keyframe .searchButtons .btn {\n margin-right: 0;\n height: auto;\n padding: 0.15rem 0.3rem;\n}\n.keyframe a {\n text-decoration: none;\n}\n\n.body > div.searchForm {\n padding-bottom: 20px;\n}\n.upload {\n position: relative;\n cursor: pointer;\n}\n.upload .btn {\n pointer-events: none;\n cursor: pointer;\n}\n.upload input {\n position: absolute;\n top: 0; left: 0;\n width: 100%; height: 100%;\n opacity: 0;\n cursor: pointer;\n}\n\n.reviewSaved,\n.browseComponent,\n.searchQuery {\n margin: 0px 10px;\n padding: 13px;\n}\n.searchQuery img {\n cursor: crosshair;\n user-select: none;\n max-width: 640px;\n max-height: 480px;\n}\n.searchContainer .searchQuery h3 {\n margin-left: 0;\n margin-bottom: 10px;\n}\n\n.searchBox {\n min-width: 640px;\n margin: 0 10px 0 0;\n background-color: #eee;\n position: relative;\n}\n.searchBox img {\n display: block;\n}\n.searchBox .box {\n position: absolute;\n cursor: crosshair;\n border: 1px solid #11f;\n background-color: rgba(16,16,255,0.1);\n}\n\n.searchResults {\n margin: 0 20px 20px 20px;\n display: flex;\n flex-direction: row;\n flex-wrap: wrap;\n}\n.searchResultsHeading {\n width: 100%;\n}\n.searchOptions .row {\n font-size: 12px;\n margin-left: 10px;\n}\n.searchOptions input {\n font-size: 12px;\n margin-right: 5px;\n font-family: Helvetica, sans-serif;\n}\n.searchOptions input[type=text],\n.searchOptions input[type=number] {\n width: 30px;\n text-align: right;\n}\n.keyframeGroup {\n max-width: 650px;\n display: flex;\n flex-direction: row;\n flex-wrap: wrap;\n align-items: flex-start;\n align-content: flex-start;\n justify-content: flex-start;\n}\n.keyframeGroup h4 a {\n color: #888;\n text-decoration: none\n}\n.keyframeGroup h4 a:hover {\n text-decoration: underline\n}\n\n/* load more button that gets bigger */\n\n.loadMore {\n width: 400px;\n margin: 20px;\n height: 40px;\n transition: all;\n}\n.loadMore.wide {\n width: calc(100% - 40px);\n margin: 20px;\n height: 100px;\n}\n.loadingMore {\n margin: 20px 20px 200px 20px;\n}\n\n/* health and safety warning */\n\n.safety div {\n display: inline-block;\n margin: 20px 20px;\n padding: 10px;\n background: #fff8e8;\n color: #111;\n box-shadow: 0 1px 2px rgba(0,0,0,0.2);\n font-size: 13px;\n line-height: 1.4;\n}\n.safety ul {\n margin: 0;\n padding: 0 21px;\n}\n.safety li {\n padding: 1px 0 0 0;\n}\n.safety h4 {\n margin-top: 5px;\n}\n\n/* browser section */\n\n.browseComponent h3 {\n margin-bottom: 10px;\n}\n.browseComponent .buttons {\n margin-top: 10px;\n}\n\n/* disable twiddle button on input[type=number] */\n\ninput::-webkit-outer-spin-button,\ninput::-webkit-inner-spin-button {\n -webkit-appearance: none;\n margin: 0;\n}\ninput[type='number'] {\n -moz-appearance:textfield;\n}\n",""])},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.Saved=void 0;var r=function(e){return e&&e.__esModule?e:{default:e}}(n(529));n(531),t.Saved=r.default},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=y(n(4)),o=y(n(28)),i=y(n(5)),a=y(n(6)),u=y(n(7)),s=y(n(8)),c=y(n(9)),l=n(1),f=y(l),d=n(15),p=n(2),h=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(97)),m=n(11),v=y(n(530));function y(e){return e&&e.__esModule?e:{default:e}}var g=function(e){function t(){var e,n,r,o;(0,a.default)(this,t);for(var u=arguments.length,c=Array(u),l=0;l<u;l++)c[l]=arguments[l];return n=r=(0,s.default)(this,(e=t.__proto__||(0,i.default)(t)).call.apply(e,[this].concat(c))),r.state={showAnnotator:!1},o=n,(0,s.default)(r,o)}return(0,c.default)(t,e),(0,u.default)(t,[{key:"render",value:function(){var e=this,t=this.props.saved,n=this.state.showAnnotator,r=(0,o.default)(t).sort().map(function(e){var n=t[e],r=n.verified,i=n.hash,a=n.frames;return(0,o.default)(a).sort().map(function(e){return{verified:r,hash:i,frame:e}})}).reduce(function(e,t){return t&&t.length?e.concat(t):e},[]),i=0===r.length;return f.default.createElement("div",{className:"reviewSaved"},f.default.createElement("h2",null,"Saved Images"),f.default.createElement("div",{className:"reviewButtons"},f.default.createElement("button",{className:"btn",disabled:i,onClick:function(){return e.setState({showAnnotator:!n})}},"Import into VCAT"),f.default.createElement("button",{className:"btn",disabled:i,onClick:function(){return e.props.actions.exportCSV()}},"Export CSV"),f.default.createElement("button",{className:"btn",disabled:i,onClick:function(){return e.props.actions.refresh()}},"Refresh"),f.default.createElement("button",{className:"btn reset",disabled:i,onClick:function(){return confirm("This will clear your saved images.")&&e.props.actions.clear()}},"Reset")),n&&f.default.createElement(v.default,null),f.default.createElement(m.Keyframes,{frames:r,showHash:!0,showTimestamp:!0,showSearchButton:!0,showSaveButton:!0}))}}]),t}(l.Component);t.default=(0,p.connect)(function(e){return{saved:e.review.saved}},function(e){return{actions:(0,d.bindActionCreators)((0,r.default)({},h),e)}})(g)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=v(n(4)),o=v(n(65)),i=v(n(5)),a=v(n(6)),u=v(n(7)),s=v(n(8)),c=v(n(9)),l=n(1),f=v(l),d=n(15),p=n(2),h=n(11),m=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(97));function v(e){return e&&e.__esModule?e:{default:e}}var y=function(e){function t(){(0,a.default)(this,t);var e=(0,s.default)(this,(t.__proto__||(0,i.default)(t)).call(this));return e.state={title:"",graphic:!1},e.handleInput=e.handleInput.bind(e),e}return(0,c.default)(t,e),(0,u.default)(t,[{key:"handleInput",value:function(e){var t=e.target,n=t.name,r=t.value;"title"===n&&(r=r.replace(/[^-_a-zA-Z0-9 ]/g,"")),"graphic"===n&&(r=e.target.checked),this.setState((0,o.default)({},n,r))}},{key:"render",value:function(){var e=this,t=this.props.review;return f.default.createElement("div",{className:"importMenu"},f.default.createElement("div",null,f.default.createElement("h3",null,"New VCAT Image Group"),f.default.createElement("label",null,f.default.createElement("input",{type:"text",name:"title",placeholder:"Title this group",autoComplete:"off",onChange:this.handleInput,value:this.state.title})),f.default.createElement("label",null,f.default.createElement("input",{type:"checkbox",name:"graphic",checked:this.state.graphic,onChange:this.handleInput})," ",f.default.createElement("small",null,"Graphic content")),f.default.createElement("label",null,f.default.createElement("button",{className:"btn check",onClick:this.props.actions.dedupe},t.dedupe.loading?"Deduping...":"Dedupe"),f.default.createElement("button",{className:"btn btn-primary create",onClick:function(){return e.props.actions.create(e.state)}},"Create Group"),(t.dedupe.loading||t.create.loading)&&f.default.createElement(h.Loader,null),!!t.dedupe.count&&t.dedupe.count+" images removed")))}}]),t}(l.Component);t.default=(0,p.connect)(function(e){return{review:e.review}},function(e){return{actions:(0,d.bindActionCreators)((0,r.default)({},m),e)}})(y)},function(e,t,n){var r=n(532);"string"==typeof r&&(r=[[e.i,r,""]]);var o={hmr:!0,transform:void 0,insertInto:void 0};n(49)(r,o);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(48)(!1)).push([e.i,".importMenu {\n padding: 10px;\n margin: 10px 0;\n background: #eee;\n}\n.reviewButtons {\n margin-bottom: 10px;\n}\n.importMenu h3 {\n margin-top: 0;\n margin-bottom: 10px;\n}\n.importMenu label {\n display: block;\n margin-bottom: 5px;\n}\n.importMenu input[type=text] {\n font-size: 13px;\n width: 250px;\n}",""])}]);
\ No newline at end of file diff --git a/old/faiss/static/js/store2.min.js b/old/faiss/static/js/store2.min.js deleted file mode 100644 index 75e3ca37..00000000 --- a/old/faiss/static/js/store2.min.js +++ /dev/null @@ -1,5 +0,0 @@ -/*! store2 - v2.7.0 - 2018-03-04 -* Copyright (c) 2018 Nathan Bubna; Licensed (MIT OR GPL-3.0) */ - -!function(a,b){var c={version:"2.7.0",areas:{},apis:{},inherit:function(a,b){for(var c in a)b.hasOwnProperty(c)||(b[c]=a[c]);return b},stringify:function(a){return void 0===a||"function"==typeof a?a+"":JSON.stringify(a)},parse:function(a){try{return JSON.parse(a)}catch(b){return a}},fn:function(a,b){c.storeAPI[a]=b;for(var d in c.apis)c.apis[d][a]=b},get:function(a,b){return a.getItem(b)},set:function(a,b,c){a.setItem(b,c)},remove:function(a,b){a.removeItem(b)},key:function(a,b){return a.key(b)},length:function(a){return a.length},clear:function(a){a.clear()},Store:function(a,b,d){var e=c.inherit(c.storeAPI,function(a,b,c){return 0===arguments.length?e.getAll():"function"==typeof b?e.transact(a,b,c):void 0!==b?e.set(a,b,c):"string"==typeof a||"number"==typeof a?e.get(a):a?e.setAll(a,b):e.clear()});e._id=a;try{b.setItem("_-bad-_","wolf"),e._area=b,b.removeItem("_-bad-_")}catch(a){}return e._area||(e._area=c.inherit(c.storageAPI,{items:{},name:"fake"})),e._ns=d||"",c.areas[a]||(c.areas[a]=e._area),c.apis[e._ns+e._id]||(c.apis[e._ns+e._id]=e),e},storeAPI:{area:function(a,b){var d=this[a];return d&&d.area||(d=c.Store(a,b,this._ns),this[a]||(this[a]=d)),d},namespace:function(a,b){if(!a)return this._ns?this._ns.substring(0,this._ns.length-1):"";var d=a,e=this[d];return e&&e.namespace||(e=c.Store(this._id,this._area,this._ns+d+"."),this[d]||(this[d]=e),b||e.area("session",c.areas.session)),e},isFake:function(){return"fake"===this._area.name},toString:function(){return"store"+(this._ns?"."+this.namespace():"")+"["+this._id+"]"},has:function(a){return this._area.has?this._area.has(this._in(a)):!!(this._in(a)in this._area)},size:function(){return this.keys().length},each:function(a,b){for(var d=0,e=c.length(this._area);d<e;d++){var f=this._out(c.key(this._area,d));if(void 0!==f&&a.call(this,f,b||this.get(f))===!1)break;e>c.length(this._area)&&(e--,d--)}return b||this},keys:function(a){return this.each(function(a,b){b.push(a)},a||[])},get:function(a,b){var d=c.get(this._area,this._in(a));return null!==d?c.parse(d):b||d},getAll:function(a){return this.each(function(a,b){b[a]=this.get(a)},a||{})},transact:function(a,b,c){var d=this.get(a,c),e=b(d);return this.set(a,void 0===e?d:e),this},set:function(a,b,d){var e=this.get(a);return null!=e&&d===!1?b:c.set(this._area,this._in(a),c.stringify(b),d)||e},setAll:function(a,b){var c,d;for(var e in a)d=a[e],this.set(e,d,b)!==d&&(c=!0);return c},add:function(a,b){var d=this.get(a);if(d instanceof Array)b=d.concat(b);else if(null!==d){var e=typeof d;if(e===typeof b&&"object"===e){for(var f in b)d[f]=b[f];b=d}else b=d+b}return c.set(this._area,this._in(a),c.stringify(b)),b},remove:function(a){var b=this.get(a);return c.remove(this._area,this._in(a)),b},clear:function(){return this._ns?this.each(function(a){c.remove(this._area,this._in(a))},1):c.clear(this._area),this},clearAll:function(){var a=this._area;for(var b in c.areas)c.areas.hasOwnProperty(b)&&(this._area=c.areas[b],this.clear());return this._area=a,this},_in:function(a){return"string"!=typeof a&&(a=c.stringify(a)),this._ns?this._ns+a:a},_out:function(a){return this._ns?a&&0===a.indexOf(this._ns)?a.substring(this._ns.length):void 0:a}},storageAPI:{length:0,has:function(a){return this.items.hasOwnProperty(a)},key:function(a){var b=0;for(var c in this.items)if(this.has(c)&&a===b++)return c},setItem:function(a,b){this.has(a)||this.length++,this.items[a]=b},removeItem:function(a){this.has(a)&&(delete this.items[a],this.length--)},getItem:function(a){return this.has(a)?this.items[a]:null},clear:function(){for(var a in this.items)this.removeItem(a)},toString:function(){return this.length+" items in "+this.name+"Storage"}}},d=c.Store("local",function(){try{return localStorage}catch(a){}}());d.local=d,d._=c,d.area("session",function(){try{return sessionStorage}catch(a){}}()),"function"==typeof b&&void 0!==b.amd?b("store2",[],function(){return d}):"undefined"!=typeof module&&module.exports?module.exports=d:(a.store&&(c.conflict=a.store),a.store=d)}(this,this.define); -//# sourceMappingURL=store2.min.js.map
\ No newline at end of file diff --git a/old/faiss/static/metadata.html b/old/faiss/static/metadata.html deleted file mode 100644 index e74e1ee1..00000000 --- a/old/faiss/static/metadata.html +++ /dev/null @@ -1,11 +0,0 @@ -<!DOCTYPE html> -<html> - <head> - <meta charset="UTF-8"> - <title>VFrame Metadata</title> - <link rel="shortcut icon" href="/search/static/favicon.ico" /> - <meta name="viewport" content="width=device-width,initial-scale=1.0"> - </head> - <body> - <script type="text/javascript" src="/search/static/js/metadata-app.js"></script></body> -</html> diff --git a/old/faiss/static/search.html b/old/faiss/static/search.html deleted file mode 100644 index 056d06c1..00000000 --- a/old/faiss/static/search.html +++ /dev/null @@ -1 +0,0 @@ -search.html
\ No newline at end of file diff --git a/old/faiss/util.py b/old/faiss/util.py deleted file mode 100644 index 97afbc22..00000000 --- a/old/faiss/util.py +++ /dev/null @@ -1,29 +0,0 @@ -import time -import simplejson as json -import pickle -from os import path -from collections import namedtuple - -# Converts JSON el['key'] to Pythonic object-style el.key -def _json_object_hook(d): - return namedtuple('X', d.keys())(*d.values()) - -# Load a JSON recipe -def load_recipe(path): - with open(path) as fh: - return json.load(fh, object_hook=_json_object_hook) - -# Load a pickle file -def load_pickle(data_dir, pkl_fn): - load_start = time.time() - with open(path.join(str(data_dir), str(pkl_fn)), 'rb') as fh: - raw = fh.read() - data = pickle.loads(raw) - load_end = time.time() - load_time = load_end - load_start - print("Pickle load time: {:.1f}s".format(load_time)) - return data - -def read_json(fn): - with open(fn, 'r') as json_file: - return json.load(json_file) diff --git a/old/faiss/wsgi.py b/old/faiss/wsgi.py deleted file mode 100644 index 371862fb..00000000 --- a/old/faiss/wsgi.py +++ /dev/null @@ -1,5 +0,0 @@ -from server import app - -if __name__ == "__main__": - app.run() - diff --git a/old/server/app/README.md b/old/server/app/README.md deleted file mode 100644 index 8bc70132..00000000 --- a/old/server/app/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# Startup - -Run supervisor - -`/usr/bin/supervisord` - -Tail log file - -`tail -f /var/log/uwsgi/app/app.log` - -`/opt/redis/redis-stable/src/redis-server &` - -`celery worker -A celery_worker.celery --loglevel=info &` - -If using on Production Server - -`/usr/bin/nohup /usr/bin/supervisord &`
\ No newline at end of file diff --git a/old/server/app/__init__.py b/old/server/app/__init__.py deleted file mode 100644 index bce3f9ee..00000000 --- a/old/server/app/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -import logging -from logging.handlers import RotatingFileHandler - -from flask import Flask -from flask_bootstrap import Bootstrap - -from flask import Flask - -from config import config, Config - -bootstrap = Bootstrap() -#celery = Celery(__name__, broker=Config.CELERY_BROKER_URL) -from .basemodels import celery - -def create_app(config_name): - app = Flask(__name__) - app.config.from_object(config[config_name]) - config[config_name].init_app(app) - - bootstrap.init_app(app) - celery.conf.update(app.config) - - from .main import main as main_blueprint - app.register_blueprint(main_blueprint) - - #handler = RotatingFileHandler('debug.log', maxBytes=10000, backupCount=1) - #handler.setLevel(logging.INFO) - #app.logger.addHandler(handler) - - format = "%(asctime)s - [%(levelname)s] %(message)s" - logging.basicConfig(filename='debug.log', - filemode='a', - format=format, - level=logging.DEBUG) - console = logging.StreamHandler() - console.setLevel(logging.DEBUG) - logging.getLogger(__name__).addHandler(console) - - return app diff --git a/old/server/app/basemodels.py b/old/server/app/basemodels.py deleted file mode 100644 index 475ab0c2..00000000 --- a/old/server/app/basemodels.py +++ /dev/null @@ -1,5 +0,0 @@ -from config import config, Config -from celery import Celery - -#bootstrap = Bootstrap() -celery = Celery(__name__, broker=Config.CELERY_BROKER_URL) diff --git a/old/server/app/favicon.ico b/old/server/app/favicon.ico Binary files differdeleted file mode 100644 index 4d001b21..00000000 --- a/old/server/app/favicon.ico +++ /dev/null diff --git a/old/server/app/index.html b/old/server/app/index.html deleted file mode 100644 index 3c1b0dfd..00000000 --- a/old/server/app/index.html +++ /dev/null @@ -1,161 +0,0 @@ -<!doctype html> -<html> -<head> - <meta charset="utf-8"> - <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" /> - <link rel="shortcut icon" href="/static/img/favicon.ico" /> - <title>DullDream (v2 x ZkM)</title> - <link rel="stylesheet" type="text/css" href="static/css/dullbrown-theme.css"> -</head> -<body> - -<header> - <h1><a href="/"><img src="/static/img/dulldream_logo_200.png" alt="DullDream"></a></h1> - <h2 class="subtitle">Neural network photo effect</h2> -</header> - -<div class="upload_view container"> - <div class="row"> - <div id="photo_area" class="dash_border"> - <input class="hidden_input" id="user_file" type="file" accept="image/*"> - <canvas class="photo" id="user_photo_canvas" width="512" height="512"></canvas> - <div class="center_inner"> - <label id="take_photo_btn" for="user_file" class="upload_center_btn"> - <div class='btn-lg btn'>Take Photo</div> - </label> - <div id="details"></div> - <div id="progress"></div> - </div> - - <div id="preloader_anim"> - <img src="/static/img/loader.gif"> - </div> - </div> - </div> - - <div id="upload_controls" class="row"> - <div class="align_center"> - <div id="restart_btn"> - <a id="restart_btn" class="btn btn-md btn-default" role="button">Change Image</a> - <input type='file' accept="image/*"> - </div> - <div id="dropdown_btn"> - <select id="dropdown"></select> - </div> - <div id="upload_btn"> - <a id="take_photo_btn" class="btn btn-md btn-important" role="button">Upload</a> - </div> - </div> - <div class="align_center consent_box"> - <label> - <input type="checkbox" id="agree" value="1" checked> - I consent to have my dulled image displayed at ZkM. - </label> - </div> - </div> - - <div id="about_btn" class="row"> - <div class="align_center"> - <a class="btn btn-sm btn-default about_button" role="button">About</a> - <a class="btn btn-sm btn-default privacy_button" role="button">Privacy</a> - <p class="notice"> - All images uploaded can be used for exhibition and review purposes. - </p> - <p class="notice"> - Currently this work is on view at <a href="http://zkm.de/en/event/2017/10/open-codes">ZKM</a>. View recent DullDreams <a href="/gallery">here</a>. - </p> - </div> - </div> -</div> - -<div class="about_view modal"> - <div class="inner"> - <header> - <h1><img src="/static/img/dulldream_logo_200.png" alt="DullDream"></h1> - </header> - <div class='content'> - <p> - <b><i>DullDream™ by DullTech™</i></b> is a series of experiments appropriating neural network image recognition technology to make visual representation less interesting. - </p> - <p> - Can machine learning help us desensitize? Our impactful lives are clogging up social media feeds with unique filter settings, leaving us nostalgic for a vanilla future. Can machine learning help us achieve this? Take the excitement out of our lives, prepare us for a time where we will all have to be the same, have the same values and culture? Painting a future where the Dull is no longer a dream but a nightmare? - </p> - <p> - DullDream™ was developed for Transmediale 2017 - Ever Elusive by <a href="http://constantdullaart.com">Constant Dullaart</a> in collaboration with <a href="http://ahprojects.com">Adam Harvey</a>. It has generously been made possible by the Creative Industries Fund NL. - </p> - </div> - <center><a class="btn btn-sm btn-default" href="/" role="button">Home</a></center> - </div> -</div> - -<div class="privacy_view modal"> - <div class="inner"> - <header> - <h1><img src="/static/img/dulldream_logo_200.png" alt="DullDream"></h1> - </header> - <div class='content'> - <h3>Privacy Notice</h3> - <p> - Images uploaded to this site are being used for a public art display at <a href="http://zkm.de/en/event/2017/10/open-codes">ZKM</a> - </p> - <p> - If you would not like to be included, be sure to uncheck the permission box on the upload page. - </p> - - </div> - <center><a class="btn btn-sm btn-default" href="/" role="button">Home</a></center> - </div> -</div> - - -<div class="result_view"> - <div class="final_result"> - </div> - - <div class="row made_with"> - Made with DullDream.xyz for ZKM OpenCodes 2017 - </div> - - <div class="row"> - <button class='btn' id="show_all_results">Detailed Analysis</button> - </div> - - <div class="all_results"> - </div> - - <div id="share_btns" class="row"> - <a id="permalink" href="#">Permalink</a> - </div> - - <div id="about_btn" class="row"> - <div class="align_center"> - <a href="/" class="btn btn-sm btn-default home_button" role="button">Home</a> - <a class="btn btn-sm btn-default about_button" role="button">About</a> - <a class="btn btn-sm btn-default privacy_button" role="button">Privacy</a> - </div> - - </div> - -</div> - -<div id="footer"> - DullDream™ (beta) by <a href="http://constantdullaart.com">Constant Dullaart</a>. - Made in collaboration with <a href="http://ahprojects.com">Adam Harvey</a> -</div> - -</body> -<script type="text/html" id="result_template"> - <div class="row"> - <img src="{img}"><br> - <b>{title}</b> - </div> -</script> -<script type="text/json" id="dropdown_options">[]</script> -<script type="text/javascript" src="static/js/vendor/jquery-3.3.1.min.js"></script> -<script type="text/javascript" src="static/js/vendor/ExifReader.js"></script> -<script type="text/javascript" src="static/js/vendor/canvas-to-blob.js"></script> -<script type="text/javascript" src="static/js/vendor/prefixfree.js"></script> -<script type="text/javascript" src="static/js/util.js"></script> -<script type="text/javascript" src="static/js/upload.js"></script> -<script type="text/javascript" src="static/js/app.js"></script> -</html> diff --git a/old/server/app/main/__init__.py b/old/server/app/main/__init__.py deleted file mode 100644 index a21e2754..00000000 --- a/old/server/app/main/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from flask import Blueprint - -main = Blueprint('main', __name__) - -from . import views, errors, tasks, utils
\ No newline at end of file diff --git a/old/server/app/main/errors.py b/old/server/app/main/errors.py deleted file mode 100644 index 60b5f227..00000000 --- a/old/server/app/main/errors.py +++ /dev/null @@ -1,32 +0,0 @@ -from flask import render_template, request, jsonify -from . import main - - -@main.app_errorhandler(403) -def forbidden(e): - if request.accept_mimetypes.accept_json and \ - not request.accept_mimetypes.accept_html: - response = jsonify({'error': 'forbidden'}) - response.status_code = 403 - return response - return render_template('403.html'), 403 - - -@main.app_errorhandler(404) -def page_not_found(e): - if request.accept_mimetypes.accept_json and \ - not request.accept_mimetypes.accept_html: - response = jsonify({'error': 'not found'}) - response.status_code = 404 - return response - return render_template('404.html'), 404 - - -@main.app_errorhandler(500) -def internal_server_error(e): - if request.accept_mimetypes.accept_json and \ - not request.accept_mimetypes.accept_html: - response = jsonify({'error': 'internal server error'}) - response.status_code = 500 - return response - return render_template('500.html'), 500 diff --git a/old/server/app/main/forms.py b/old/server/app/main/forms.py deleted file mode 100644 index bc1399ad..00000000 --- a/old/server/app/main/forms.py +++ /dev/null @@ -1,60 +0,0 @@ -from flask.ext.wtf import Form -from wtforms import StringField, TextAreaField, BooleanField, SelectField,\ - SubmitField -from wtforms.validators import Required, Length, Email, Regexp -from wtforms import ValidationError -from flask.ext.pagedown.fields import PageDownField -from ..models import Role, User - - -class NameForm(Form): - name = StringField('What is your name?', validators=[Required()]) - submit = SubmitField('Submit') - - -class EditProfileForm(Form): - name = StringField('Real name', validators=[Length(0, 64)]) - location = StringField('Location', validators=[Length(0, 64)]) - about_me = TextAreaField('About me') - submit = SubmitField('Submit') - - -class EditProfileAdminForm(Form): - email = StringField('Email', validators=[Required(), Length(1, 64), - Email()]) - username = StringField('Username', validators=[ - Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0, - 'Usernames must have only letters, ' - 'numbers, dots or underscores')]) - confirmed = BooleanField('Confirmed') - role = SelectField('Role', coerce=int) - name = StringField('Real name', validators=[Length(0, 64)]) - location = StringField('Location', validators=[Length(0, 64)]) - about_me = TextAreaField('About me') - submit = SubmitField('Submit') - - def __init__(self, user, *args, **kwargs): - super(EditProfileAdminForm, self).__init__(*args, **kwargs) - self.role.choices = [(role.id, role.name) - for role in Role.query.order_by(Role.name).all()] - self.user = user - - def validate_email(self, field): - if field.data != self.user.email and \ - User.query.filter_by(email=field.data).first(): - raise ValidationError('Email already registered.') - - def validate_username(self, field): - if field.data != self.user.username and \ - User.query.filter_by(username=field.data).first(): - raise ValidationError('Username already in use.') - - -class PostForm(Form): - body = PageDownField("What's on your mind?", validators=[Required()]) - submit = SubmitField('Submit') - - -class CommentForm(Form): - body = StringField('Enter your comment', validators=[Required()]) - submit = SubmitField('Submit') diff --git a/old/server/app/main/img_proc_config.py b/old/server/app/main/img_proc_config.py deleted file mode 100644 index db124978..00000000 --- a/old/server/app/main/img_proc_config.py +++ /dev/null @@ -1,20 +0,0 @@ -# paths for image processors -import os -from os.path import join - -class ImgProcConfig: - - def __init__(self): - dir_models = '/data_store/apps/dulldream/dnn_models' - - # mask rcnn - self.mask_rcnn_class_config = '/dulldream/src/config/coco_meta.json' - self.mask_rcnn_model = join(dir_models,'tf/mask_rcnn_coco.h5') - - # p2p - self.p2p_ckpts_dir = join(dir_models,'p2p/coco2014_person') - self.p2p_epoch = 'latest' - - # p2p objects only - self.p2p_bg_ckpts_dir = join(dir_models,'p2p/coco2014_objects') - self.p2p_bg_epoch = 'latest' diff --git a/old/server/app/main/paths.py b/old/server/app/main/paths.py deleted file mode 100644 index 69c21627..00000000 --- a/old/server/app/main/paths.py +++ /dev/null @@ -1,19 +0,0 @@ -from flask import current_app as app - -def get_paths(agree): - if agree: - return ( - app.config['UPLOADS'], - app.config['RENDERS'], - app.config['JSON_DIR'], - app.config['UPLOADS_URI'], - app.config['RENDERS_URI'], - ) - else: - return ( - app.config['UPLOADS_PRIVATE'], - app.config['RENDERS_PRIVATE'], - app.config['JSON_PRIVATE_DIR'], - app.config['UPLOADS_PRIVATE_URI'], - app.config['RENDERS_PRIVATE_URI'], - ) diff --git a/old/server/app/main/tasks.py b/old/server/app/main/tasks.py deleted file mode 100644 index 970e6988..00000000 --- a/old/server/app/main/tasks.py +++ /dev/null @@ -1,374 +0,0 @@ -import os -import sys -import time -import datetime -import json -from PIL import Image, ImageFilter -import cv2 as cv -import numpy as np -from . import main, utils -from .. import basemodels -from flask import current_app as app -from .paths import get_paths -celery = basemodels.celery -from celery.utils.log import get_task_logger -celery_logger = get_task_logger(__name__) -import imutils - - -# init image processors -sys.path.append('/dulldream/src/') -from .img_proc_config import ImgProcConfig -from image_processors.mask_rcnn import MaskRCNN -from image_processors.pix2pix import Pix2Pix -from utils import imx -from utils import fiox - - -# initialize image processor -img_proc_config = ImgProcConfig() -p2p = Pix2Pix(img_proc_config.p2p_ckpts_dir,epoch=img_proc_config.p2p_epoch) -p2p_objects = Pix2Pix(img_proc_config.p2p_bg_ckpts_dir,epoch=img_proc_config.p2p_epoch) - -mask_rcnn = MaskRCNN(img_proc_config.mask_rcnn_class_config, - model_path=img_proc_config.mask_rcnn_model) - - -@celery.task(bind=True) -def task_dull(self, uuid_name, agree, mask_rcnn_result): - """Process image and update during""" - celery_logger.debug('process_image_task, uuid: {}'.format(uuid_name)) - - upload_dir, render_dir, json_dir, upload_uri, render_uri = get_paths(agree) - - files = [] - im = Image.open(os.path.join(upload_dir, uuid_name + '.jpg')).convert('RGB') - #im_np = cv.cvtColor(imx.ensure_np(im),cv.COLOR_RGB2BGR) - im_np = imx.ensure_np(im) - im_np = im_np[:,:,::-1] - im = im.resize((256,256)) - im_np_256 = imutils.resize(im_np,width=256) - - # Add original - fpath = os.path.join(render_dir, uuid_name + '_orig.jpg') - im.save(fpath, 'JPEG', quality=95) - files.append({ - 'title': 'Original', - 'fn': render_uri + uuid_name + '_orig.jpg' - }) - - if mask_rcnn_result['valid']: - # ----------------------------------------------- - # Segment image (processed in views) - # seems to be an error with async celery processor? - # ----------------------------------------------- - - # parse mrcnn data - im_mask = cv.imread(mask_rcnn_result['fp_im_mask']) - seg_mask = cv.imread(mask_rcnn_result['fp_seg_mask']) - #score = mask_rcnn_result['score'] - #name = mask_rcnn_result['name'] - #color = mask_rcnn_result['color'] - files.append({ - 'title': 'Semantic Segmentation', - 'fn': render_uri + uuid_name + '_seg_mask.jpg' - }) - files.append({ - 'title': 'Semantic Segmentation Isolate', - 'fn': render_uri + uuid_name + '_im_mask.jpg' - }) - - - # ----------------------------------------------- - # run rag generator - # ----------------------------------------------- - - self.update_state( - state = 'PROCESSING', - meta = { - 'percent': 0.50, - 'message': 'Applying Region Adjacency Graph', - 'uuid': uuid_name - }) - - # save the regions adjancency graph - im_rag = imx.create_rag_mean(im_mask,compactness=30,n_segments=128) - fpath = os.path.join(render_dir, uuid_name + '_rgraph.jpg') - imx.save_np_as_pil(fpath,im_rag,quality=95) - files.append({ - 'title': 'Region Adjacency Graph', - 'fn': render_uri + uuid_name + '_rgraph.jpg' - }) - - - # ----------------------------------------------- - # generate p2p fake - # ----------------------------------------------- - - self.update_state( - state = 'PROCESSING', - meta = { - 'percent': 0.75, - 'message': 'Running generative adversarial network...', - 'uuid': uuid_name - }) - - - # convert segmentation to mask - seg_mask_gray = cv.cvtColor(seg_mask,cv.COLOR_BGR2GRAY) - seg_mask_gray[seg_mask_gray > 1] = 255 - - # find best P2P fit - ims_p2p = [] - match_amts = [] - iters = 15 - for i in range(0,iters): - im_p2p = p2p.create_p2p(im_rag) - ims_p2p.append(im_p2p) - im_p2p_mask = cv.cvtColor(im_p2p,cv.COLOR_RGB2GRAY) - im_p2p_mask[im_p2p_mask > 1] = 255 - # find where masks intersect - matches = np.bitwise_and(im_p2p_mask,seg_mask_gray) - amt = len(np.where(matches == 255)[0]) - match_amts.append(amt) - self.update_state( - state = 'PROCESSING', - meta = { - 'percent': 0.75, - 'message': 'Generating ({}/{})'.format(i,iters), - 'uuid': uuid_name - }) - - best_idx = np.argmax(match_amts) - im_p2p = ims_p2p[best_idx] - - fpath = os.path.join(render_dir, uuid_name + '_gan.jpg') - imx.save_np_as_pil(fpath,im_p2p,quality=95) - files.append({ - 'title': 'Generative Adversarial Network', - 'fn': render_uri + uuid_name + '_gan.jpg' - }) - - - # ----------------------------------------------- - # generate p2p fake - # ----------------------------------------------- - - # announce to user - self.update_state( - state = 'PROCESSING', - meta = { - 'percent': 0.90, - 'message': 'Compositing images...', - 'uuid': uuid_name - }) - - - # apply masked cloning - im_p2p_gray = cv.cvtColor(im_p2p,cv.COLOR_BGR2GRAY) - im_clone_mask = np.zeros_like(im_p2p_gray,dtype=np.uint8) - im_clone_mask[im_p2p_gray > 1] = 255 - - - # apply smoothed copy+paste clone - im_blur_mask = np.zeros(im_np_256.shape[:2],dtype=np.float64) - im_blur_mask[im_p2p_gray > 1] = 1.0 - im_blur_mask = np.array([im_blur_mask,im_blur_mask,im_blur_mask]).transpose((1,2,0)) - - # erode mask to remove black border - kernel = np.ones((3,3),np.uint8) - im_blur_mask = cv.erode(im_blur_mask,kernel,iterations = 3) - - # feather mask - feather_amt = (3,3) - im_blur_mask = (cv.GaussianBlur(im_blur_mask,feather_amt, 0) > 0) * 1.0 #? - im_blur_mask = cv.GaussianBlur(im_blur_mask,feather_amt, 0) - im_blur_mask = np.clip(im_blur_mask,0.0,1.0) - - # mask p2p fg --> photo bg - im_dull = im_np_256.astype(np.float64) * (1.0 - im_blur_mask) + im_p2p.astype(np.float64) * im_blur_mask - im_dull = im_dull.astype(np.uint8) - - - else: - print('No person. Apply background P2P') - celery_logger.debug('No person. Apply background P2P, uuid: {}'.format(uuid_name)) - im_bg_blur = cv.GaussianBlur(im_np_256,(31,31),0) - im_bg_rag = imx.create_rag_mean(im_bg_blur,compactness=30,n_segments=64) - - # apply gan - im_dull = p2p_objects.create_p2p(im_bg_rag) - - # resize back to full 512px - im_dull_512 = imutils.resize(im_dull,width=512) - - # save dulldream image - fpath = os.path.join(render_dir, uuid_name + '_dull.jpg') - imx.save_np_as_pil(fpath,im_dull_512,quality=95) - files.append({ - 'title': 'Your DullDream', - 'fn': render_uri + uuid_name + '_dull.jpg' - }) - - - # ----------------------------------------------- - # Write data to disk - # ----------------------------------------------- - - data = { - 'uuid': uuid_name, - 'date': str(datetime.datetime.now()), - 'files': files - } - - json_path = os.path.join(json_dir, uuid_name + '.json') - with open(json_path, 'w') as json_file: - json.dump(data, json_file) - - return { - 'percent': 100, - 'state': 'complete', - 'uuid': uuid_name - } - - - - -@celery.task(bind=True) -def blur_task(self, uuid_name, agree, extra): - """Process image and update during""" - celery_logger.debug('process_image_task, uuid: {}'.format(uuid_name)) - - upload_dir, render_dir, json_dir, upload_uri, render_uri = get_paths(agree) - - files = [] - - im = Image.open(os.path.join(upload_dir, uuid_name + '.jpg')).convert('RGB') - im = im.resize((256,256)) - files.append({ - 'title': 'Original image', - 'fn': upload_uri + uuid_name + '.jpg' - }) - - self.update_state( - state = 'PROCESSING', - meta = { - 'percent': 0.25, - 'message': 'Applying blur', - 'uuid': uuid_name - }) - - im_np = utils.ensure_np(im) - im_blur = cv.blur(im_np, (5,5), 1.0) - im_blur_pil = utils.ensure_pil(im_blur) - - fn = uuid_name + '_blur.jpg' - fpath = os.path.join(render_dir, fn) - im_blur_pil.save(fpath, 'JPEG', quality=95) - - files.append({ - 'title': 'Blurred image', - 'fn': render_uri + uuid_name + '_blur.jpg' - }) - - time.sleep(3) - - self.update_state( - state = 'PROCESSING', - meta = { - 'percent': 0.50, - 'message': 'Sleeping for some reason', - 'uuid': uuid_name - }) - time.sleep(2) - - self.update_state( - state = 'PROCESSING', - meta = { - 'percent': 0.75, - 'message': 'Sleeping some more', - 'uuid': uuid_name - }) - time.sleep(2) - - data = { - 'uuid': uuid_name, - 'date': str(datetime.datetime.now()), - 'files': files - } - - json_path = os.path.join(json_dir, uuid_name + '.json') - with open(json_path, 'w') as json_file: - json.dump(data, json_file) - - celery_logger.debug('ok') - - return { - 'percent': 100, - 'state': 'complete', - 'uuid': uuid_name, - } - -@celery.task(bind=True) -def sleep_task(self, uuid_name): - celery_logger.debug('sleep_task'.format(uuid_name)) - msgs = [ - {'msg':'Uploaded OK','time':.1}, - {'msg':'Segmenting Image...','time':2}, - {'msg':'Found: Person, Horse','time':1}, - {'msg':'Creating Pix2Pix','time':2} - ] - for i,m in enumerate(msgs): - percent = int(float(i)/float(len(msgs))*100.0) - self.update_state( - state = 'PROCESSING', - meta = { - 'percent': percent, - 'message': m['msg'], - 'uuid': uuid_name - }) - celery_logger.debug(m['msg']) - time.sleep(m['time']) - - return { - 'percent': 100, - 'state': 'complete', - 'uuid': uuid_name - } - -def make_task_json(): - dropdown = {} - for k,v in task_lookup.items(): - if 'active' not in v or v['active'] is not False: - is_default = 'default' in v and v['default'] is True - task = { - 'name': k, - 'title': v['title'], - 'selected': is_default, - } - dropdown[k] = task - return json.dumps(dropdown) - -# Add all valid tasks to this lookup. -# Set 'active': False to disable a task -# Set 'default': True to define the default task - -task_lookup = { - 'sleep': { - 'title': 'Sleep Test', - 'task': sleep_task, - 'active': False - }, - 'blur': { - 'title': 'Blur', - 'task': blur_task, - 'active': False - }, - 'task_dull': { - 'title': 'DullDream V2', - 'task': task_dull, - 'active': True, - 'default': True - } -} - diff --git a/old/server/app/main/utils.py b/old/server/app/main/utils.py deleted file mode 100644 index 62d9c95f..00000000 --- a/old/server/app/main/utils.py +++ /dev/null @@ -1,37 +0,0 @@ -from flask import current_app as app -from PIL import Image -import numpy as np -import cv2 as cv -import os -from os.path import join - -def ensure_pil(im): - try: - im.verify() - return im - except: - return Image.fromarray(im.astype('uint8'), 'RGB') - -def ensure_np(im): - if type(im) == np.ndarray: - return im - return np.asarray(im, np.uint8) - -def get_recent_uploads(limit=10): - d_uploads = app.config['UPLOADS'] - d_renders = app.config['RENDERS'] - - # list all files in uploads dir - filenames = [s for s in os.listdir(d_uploads) - if os.path.isfile(os.path.join(d_uploads, s))] - # sort uploaded files by date - filenames.sort(key=lambda s: os.path.getmtime(os.path.join(d_uploads, s)),reverse=True) - basenames = [os.path.splitext(os.path.basename(f))[0] for f in filenames] - basenames = basenames[:limit] - filenames = [f for f in basenames if os.path.isfile(join(d_renders,'{}_dull.jpg'.format(f)))] - - # create list for uploads and renders - uploads = [join('/img/uploads',f) for f in filenames] - renders = [join('/img/renders','{}_dull'.format(f)) for f in filenames] - urls = [join('/d',f) for f in basenames] - return uploads, renders, urls diff --git a/old/server/app/main/views.py b/old/server/app/main/views.py deleted file mode 100644 index 11a8ca53..00000000 --- a/old/server/app/main/views.py +++ /dev/null @@ -1,300 +0,0 @@ -import os -import uuid -import json -from flask import render_template, redirect, url_for, send_from_directory -from flask import request, make_response, jsonify -from . import main, utils -from .tasks import task_lookup, make_task_json -from PIL import Image, ImageOps -import cv2 as cv - -from .paths import get_paths - -from flask import current_app as app -from werkzeug.utils import secure_filename -import imutils - -# ------------------------------------------------------------ -# Temp: run mask rcnn outside celery -# ------------------------------------------------------------ - -# init image processors -import sys -from .img_proc_config import ImgProcConfig -sys.path.append('/dulldream/src/') -from image_processors.mask_rcnn import MaskRCNN -from utils import imx -from utils import fiox - -img_proc_congif = ImgProcConfig() -mask_rcnn = MaskRCNN(img_proc_congif.mask_rcnn_class_config, - model_path=img_proc_congif.mask_rcnn_model) - -# ------------------------------------------------------------ -# Tasks -# ------------------------------------------------------------ - -@main.route('/status/<task_name>/<task_id>') -def task_status(task_name, task_id): - """Return celery image processing status""" - if task_name in task_lookup: - task = task_lookup[task_name]['task'].AsyncResult(task_id) - else: - return jsonify({ - 'state': 'error', - 'percent': 100, - 'message': 'Unknown task' - }) - - app.logger.info('task state: {}'.format(task.state)) - if task.state == 'PENDING': - response = { - 'state': task.state, - 'percent': 0, - 'message': 'Pending...' - } - elif task.state != 'FAILURE': - response = { - 'state': task.state, - 'percent': task.info.get('percent', 0), - 'uuid': task.info.get('uuid', 0), - 'message': task.info.get('message', '') - } - if 'result' in task.info: - response['result'] = task.info['result'] - else: - # something went wrong in the background job - response = { - 'state': task.state, - 'percent': 100, - 'message': str(task.info), # this is the exception raised - } - return jsonify(response) - -# ------------------------------------------------------------ -# POST Routes -# ------------------------------------------------------------ - -@main.route('/upload/sleep', methods=['GET', 'POST']) -def sleep_test(): - async_task = task_lookup['sleep']['task'].apply_async(args=['sleep_test']) - task_url = url_for('main.task_status', task_name='sleep', task_id=async_task.id) - return jsonify({ - 'result': True, - 'task_url': task_url, - }) - -@main.route('/upload', methods=['POST']) -def upload(): - - style = request.form['style'] - print('style',style) - if style in task_lookup: - task = task_lookup[style]['task'] - print('task',task) - else: - return jsonify({ - 'result': False, - 'error': 'Unknown task', - }) - - file = request.files['user_image'] - agree = bool(request.form['agree']) - ext = request.form['ext'] - if ext is None: - ext = request.files['ext'] - - uuid_name = str(uuid.uuid4()) - - app.logger.info('[+] style: {}'.format(style)) - app.logger.info('[+] ext: {}'.format(ext)) - app.logger.info('[+] uuid_name: {}'.format(uuid_name)) - app.logger.info('[+] agreed: {}'.format(agree)) - - # convert PNG to JPG - print('[+] Resizing image') - - # LOL MaskRCNN needs to be run outside of the Celery Task - im = Image.open(file.stream).convert('RGB') - im = ImageOps.fit(im,(512,512)) - if agree: - upload_folder = app.config['UPLOADS'] - else: - upload_folder = app.config['UPLOADS_PRIVATE'] - - fpath = os.path.join(upload_folder, uuid_name + '.jpg') - - # Save image to disk - print('[+] Save image to {}'.format(fpath)) - im.save(fpath, 'JPEG', quality=100) - im_pil_256 = im.resize((256,256)) - - print('[+] ensure_np...') - im_np = imx.ensure_np(im_pil_256) - #print('[+] resize np...') - #im_np = imutils.resize(im_np,width=256) - - upload_dir, render_dir, json_dir, upload_uri, render_uri = get_paths(agree) - - print('[+] Run mrcnn...') - try: - result = mask_rcnn.create_segmentations(im_np,concat=True) - except: - print('[-] Error. Could not run mask_rcnn') - result = [] - - if len(result) > 0: - result = result[0] - - # save data, then pass to celery task - print('[+] Save masks') - seg_mask = result['seg_mask'] - fpath_seg_mask = os.path.join(render_dir, uuid_name + '_seg_mask.jpg') - #cv.imwrite(fpath_seg_mask,cv.cvtColor(seg_mask,cv.COLOR_BGR2RGB)) - #seg_mask = seg_mask[:,:,::-1] - seg_mask_pil = imx.ensure_pil(seg_mask) - seg_mask_pil.save(fpath_seg_mask, 'JPEG', quality=100) - - im_mask = result['im_mask'] - fpath_im_mask = os.path.join(render_dir, uuid_name + '_im_mask.jpg') - #im_mask = im_mask[:,:,::-1] - im_mask_pil = imx.ensure_pil(im_mask) - im_mask_pil.save(fpath_im_mask, 'JPEG',quality=100) - #cv.imwrite(fpath_im_mask,cv.cvtColor(im_mask,cv.COLOR_BGR2RGB)) - - celery_result = { - 'score':str(result['score']), - 'name':str(result['name']), - 'class_index':str(result['class_index']), - 'color':str(result['color']), - 'fp_im_mask':fpath_im_mask, - 'fp_seg_mask':fpath_seg_mask, - 'valid':True - } - else: - print('[-] no reults. process background only') - celery_result = { - 'score':None, - 'name':None, - 'class_index':None, - 'color':None, - 'fp_im_mask':None, - 'fp_seg_mask':None, - 'valid':False - } - - print('[+] Start celery') - async_task = task.apply_async(args=[uuid_name, agree, celery_result]) - task_url = url_for('main.task_status', task_name=style, task_id=async_task.id) - - return jsonify({ - 'result': True, - 'task_url': task_url, - 'uuid': uuid_name - }) - - - -# ---------------------------------------------------- -# Fileserver, temp solution -# ---------------------------------------------------- - -@main.route('/img/<string:imtype>/<string:uuid_name>') -def get_image(imtype,uuid_name): - """Return image files from render or uploads""" - if imtype == 'uploads': - d = app.config['UPLOADS'] - suffix = '' - elif imtype == 'renders': - d = app.config['RENDERS'] - suffix = '' - elif imtype == 'fcn': - d = app.config['RENDERS'] - suffix = '_fcn8' - - fname = uuid_name + suffix + '.jpg' - fpath = os.path.join(d, fname) - - if os.path.isfile(fpath): - return send_from_directory(d,fname) - else: - return send_from_directory('static', 'img/404.jpg') - -# ---------------------------------------------------- -# Deleting images -# ---------------------------------------------------- - -def destroy_data(uuid_name, is_public): - uri_base = app.config['URI_BASE'] - upload_dir, render_dir, json_dir, upload_uri, render_uri = get_paths(is_public) - - json_path = os.path.join(json_dir, uuid_name + '.json') - with open(json_path) as json_file: - data = json.load(json_file) - for f in data['files']: - path = os.path.join(uri_base, f['fn'][1:]) - if os.path.exists(path): - os.remove(path) - os.remove(json_path) - -@main.route('/d/<uuid_name>/destroy', strict_slashes=False) # public -def route_public_destroy(uuid_name): - destroy_data(uuid_name, True) - return redirect("/", code=302) - -@main.route('/p/<uuid_name>/destroy', strict_slashes=False) # private -def route_private_destroy(uuid_name): - destroy_data(uuid_name, False) - return redirect("/", code=302) - -# ---------------------------------------------------- -# Static routes -# ---------------------------------------------------- - -# Most of the pages are served with the single page app in index.html: - -task_json = make_task_json() - -@main.route('/', strict_slashes=False) -def index(): - return render_template('index.html', task_json=task_json) - -@main.route('/about', strict_slashes=False) -def about(): - return render_template('index.html', task_json=task_json) - -@main.route('/d/<uuid_name>', strict_slashes=False) # public -def route_public(uuid_name): - return render_template('index.html', task_json=task_json) - -@main.route('/p/<uuid_name>', strict_slashes=False) # private -def route_private(uuid_name): - return render_template('index.html', task_json=task_json) - -@main.route('/privacy', strict_slashes=False) -def privacy(): - return render_template('index.html', task_json=task_json) - -# Some of the pages have their own static file: - -@main.route('/gallery', strict_slashes = False) -def gallery(): - app.logger.info('access gallery') - uploads, renders, urls = utils.get_recent_uploads(limit=50) - uuids = [os.path.splitext(os.path.basename(f))[0] for f in uploads] - images = [{'upload':u,'render':r, 'url':url} for u,r,url in zip(uploads,renders,urls)] - return render_template('gallery.html',images=images) - -@main.route('/zkm', strict_slashes=False) -def zkm(): - app.logger.info('access ZkM') - return render_template('zkm.html') - -@main.route('/celery', strict_slashes=False) -def celery_route(): - return render_template('celery.html') - -@main.route('/projector', strict_slashes=False) -def projector(): - uploads, renders,urls = utils.get_recent_uploads() - return render_template('projector.html', uploads=uploads, renders=renders) diff --git a/old/server/app/static/css/bootstrap.min.css b/old/server/app/static/css/bootstrap.min.css deleted file mode 100644 index ed3905e0..00000000 --- a/old/server/app/static/css/bootstrap.min.css +++ /dev/null @@ -1,6 +0,0 @@ -/*! - * Bootstrap v3.3.7 (http://getbootstrap.com) - * Copyright 2011-2016 Twitter, Inc. - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) - *//*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background-color:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{margin:.67em 0;font-size:2em}mark{color:#000;background:#ff0}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{height:0;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{margin:0;font:inherit;color:inherit}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}input{line-height:normal}input[type=checkbox],input[type=radio]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{padding:.35em .625em .75em;margin:0 2px;border:1px solid silver}legend{padding:0;border:0}textarea{overflow:auto}optgroup{font-weight:700}table{border-spacing:0;border-collapse:collapse}td,th{padding:0}/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */@media print{*,:after,:before{color:#000!important;text-shadow:none!important;background:0 0!important;-webkit-box-shadow:none!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}a[href^="javascript:"]:after,a[href^="#"]:after{content:""}blockquote,pre{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}.navbar{display:none}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table td,.table th{background-color:#fff!important}.table-bordered td,.table-bordered th{border:1px solid #ddd!important}}@font-face{font-family:'Glyphicons Halflings';src:url(../fonts/glyphicons-halflings-regular.eot);src:url(../fonts/glyphicons-halflings-regular.eot?#iefix) format('embedded-opentype'),url(../fonts/glyphicons-halflings-regular.woff2) format('woff2'),url(../fonts/glyphicons-halflings-regular.woff) format('woff'),url(../fonts/glyphicons-halflings-regular.ttf) format('truetype'),url(../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular) format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons Halflings';font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.glyphicon-asterisk:before{content:"\002a"}.glyphicon-plus:before{content:"\002b"}.glyphicon-eur:before,.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-lock:before{content:"\e033"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-bookmark:before{content:"\e044"}.glyphicon-print:before{content:"\e045"}.glyphicon-camera:before{content:"\e046"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-fire:before{content:"\e104"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-calendar:before{content:"\e109"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-bell:before{content:"\e123"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-wrench:before{content:"\e136"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-briefcase:before{content:"\e139"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-paperclip:before{content:"\e142"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-pushpin:before{content:"\e146"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}.glyphicon-cd:before{content:"\e201"}.glyphicon-save-file:before{content:"\e202"}.glyphicon-open-file:before{content:"\e203"}.glyphicon-level-up:before{content:"\e204"}.glyphicon-copy:before{content:"\e205"}.glyphicon-paste:before{content:"\e206"}.glyphicon-alert:before{content:"\e209"}.glyphicon-equalizer:before{content:"\e210"}.glyphicon-king:before{content:"\e211"}.glyphicon-queen:before{content:"\e212"}.glyphicon-pawn:before{content:"\e213"}.glyphicon-bishop:before{content:"\e214"}.glyphicon-knight:before{content:"\e215"}.glyphicon-baby-formula:before{content:"\e216"}.glyphicon-tent:before{content:"\26fa"}.glyphicon-blackboard:before{content:"\e218"}.glyphicon-bed:before{content:"\e219"}.glyphicon-apple:before{content:"\f8ff"}.glyphicon-erase:before{content:"\e221"}.glyphicon-hourglass:before{content:"\231b"}.glyphicon-lamp:before{content:"\e223"}.glyphicon-duplicate:before{content:"\e224"}.glyphicon-piggy-bank:before{content:"\e225"}.glyphicon-scissors:before{content:"\e226"}.glyphicon-bitcoin:before{content:"\e227"}.glyphicon-btc:before{content:"\e227"}.glyphicon-xbt:before{content:"\e227"}.glyphicon-yen:before{content:"\00a5"}.glyphicon-jpy:before{content:"\00a5"}.glyphicon-ruble:before{content:"\20bd"}.glyphicon-rub:before{content:"\20bd"}.glyphicon-scale:before{content:"\e230"}.glyphicon-ice-lolly:before{content:"\e231"}.glyphicon-ice-lolly-tasted:before{content:"\e232"}.glyphicon-education:before{content:"\e233"}.glyphicon-option-horizontal:before{content:"\e234"}.glyphicon-option-vertical:before{content:"\e235"}.glyphicon-menu-hamburger:before{content:"\e236"}.glyphicon-modal-window:before{content:"\e237"}.glyphicon-oil:before{content:"\e238"}.glyphicon-grain:before{content:"\e239"}.glyphicon-sunglasses:before{content:"\e240"}.glyphicon-text-size:before{content:"\e241"}.glyphicon-text-color:before{content:"\e242"}.glyphicon-text-background:before{content:"\e243"}.glyphicon-object-align-top:before{content:"\e244"}.glyphicon-object-align-bottom:before{content:"\e245"}.glyphicon-object-align-horizontal:before{content:"\e246"}.glyphicon-object-align-left:before{content:"\e247"}.glyphicon-object-align-vertical:before{content:"\e248"}.glyphicon-object-align-right:before{content:"\e249"}.glyphicon-triangle-right:before{content:"\e250"}.glyphicon-triangle-left:before{content:"\e251"}.glyphicon-triangle-bottom:before{content:"\e252"}.glyphicon-triangle-top:before{content:"\e253"}.glyphicon-console:before{content:"\e254"}.glyphicon-superscript:before{content:"\e255"}.glyphicon-subscript:before{content:"\e256"}.glyphicon-menu-left:before{content:"\e257"}.glyphicon-menu-right:before{content:"\e258"}.glyphicon-menu-down:before{content:"\e259"}.glyphicon-menu-up:before{content:"\e260"}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}:after,:before{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:10px;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.42857143;color:#333;background-color:#fff}button,input,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#337ab7;text-decoration:none}a:focus,a:hover{color:#23527c;text-decoration:underline}a:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.carousel-inner>.item>a>img,.carousel-inner>.item>img,.img-responsive,.thumbnail a>img,.thumbnail>img{display:block;max-width:100%;height:auto}.img-rounded{border-radius:6px}.img-thumbnail{display:inline-block;max-width:100%;height:auto;padding:4px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}[role=button]{cursor:pointer}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{font-family:inherit;font-weight:500;line-height:1.1;color:inherit}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-weight:400;line-height:1;color:#777}.h1,.h2,.h3,h1,h2,h3{margin-top:20px;margin-bottom:10px}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small{font-size:65%}.h4,.h5,.h6,h4,h5,h6{margin-top:10px;margin-bottom:10px}.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-size:75%}.h1,h1{font-size:36px}.h2,h2{font-size:30px}.h3,h3{font-size:24px}.h4,h4{font-size:18px}.h5,h5{font-size:14px}.h6,h6{font-size:12px}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16px;font-weight:300;line-height:1.4}@media (min-width:768px){.lead{font-size:21px}}.small,small{font-size:85%}.mark,mark{padding:.2em;background-color:#fcf8e3}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}.text-justify{text-align:justify}.text-nowrap{white-space:nowrap}.text-lowercase{text-transform:lowercase}.text-uppercase{text-transform:uppercase}.text-capitalize{text-transform:capitalize}.text-muted{color:#777}.text-primary{color:#337ab7}a.text-primary:focus,a.text-primary:hover{color:#286090}.text-success{color:#3c763d}a.text-success:focus,a.text-success:hover{color:#2b542c}.text-info{color:#31708f}a.text-info:focus,a.text-info:hover{color:#245269}.text-warning{color:#8a6d3b}a.text-warning:focus,a.text-warning:hover{color:#66512c}.text-danger{color:#a94442}a.text-danger:focus,a.text-danger:hover{color:#843534}.bg-primary{color:#fff;background-color:#337ab7}a.bg-primary:focus,a.bg-primary:hover{background-color:#286090}.bg-success{background-color:#dff0d8}a.bg-success:focus,a.bg-success:hover{background-color:#c1e2b3}.bg-info{background-color:#d9edf7}a.bg-info:focus,a.bg-info:hover{background-color:#afd9ee}.bg-warning{background-color:#fcf8e3}a.bg-warning:focus,a.bg-warning:hover{background-color:#f7ecb5}.bg-danger{background-color:#f2dede}a.bg-danger:focus,a.bg-danger:hover{background-color:#e4b9b9}.page-header{padding-bottom:9px;margin:40px 0 20px;border-bottom:1px solid #eee}ol,ul{margin-top:0;margin-bottom:10px}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;margin-left:-5px;list-style:none}.list-inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-top:0;margin-bottom:20px}dd,dt{line-height:1.42857143}dt{font-weight:700}dd{margin-left:0}@media (min-width:768px){.dl-horizontal dt{float:left;width:160px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}}abbr[data-original-title],abbr[title]{cursor:help;border-bottom:1px dotted #777}.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;font-size:17.5px;border-left:5px solid #eee}blockquote ol:last-child,blockquote p:last-child,blockquote ul:last-child{margin-bottom:0}blockquote .small,blockquote footer,blockquote small{display:block;font-size:80%;line-height:1.42857143;color:#777}blockquote .small:before,blockquote footer:before,blockquote small:before{content:'\2014 \00A0'}.blockquote-reverse,blockquote.pull-right{padding-right:15px;padding-left:0;text-align:right;border-right:5px solid #eee;border-left:0}.blockquote-reverse .small:before,.blockquote-reverse footer:before,.blockquote-reverse small:before,blockquote.pull-right .small:before,blockquote.pull-right footer:before,blockquote.pull-right small:before{content:''}.blockquote-reverse .small:after,.blockquote-reverse footer:after,.blockquote-reverse small:after,blockquote.pull-right .small:after,blockquote.pull-right footer:after,blockquote.pull-right small:after{content:'\00A0 \2014'}address{margin-bottom:20px;font-style:normal;line-height:1.42857143}code,kbd,pre,samp{font-family:Menlo,Monaco,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;background-color:#f9f2f4;border-radius:4px}kbd{padding:2px 4px;font-size:90%;color:#fff;background-color:#333;border-radius:3px;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.25);box-shadow:inset 0 -1px 0 rgba(0,0,0,.25)}kbd kbd{padding:0;font-size:100%;font-weight:700;-webkit-box-shadow:none;box-shadow:none}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.42857143;color:#333;word-break:break-all;word-wrap:break-word;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border-radius:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{margin-right:-15px;margin-left:-15px}.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{position:relative;min-height:1px;padding-right:15px;padding-left:15px}.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666667%}.col-xs-10{width:83.33333333%}.col-xs-9{width:75%}.col-xs-8{width:66.66666667%}.col-xs-7{width:58.33333333%}.col-xs-6{width:50%}.col-xs-5{width:41.66666667%}.col-xs-4{width:33.33333333%}.col-xs-3{width:25%}.col-xs-2{width:16.66666667%}.col-xs-1{width:8.33333333%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666667%}.col-xs-pull-10{right:83.33333333%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666667%}.col-xs-pull-7{right:58.33333333%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666667%}.col-xs-pull-4{right:33.33333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.66666667%}.col-xs-pull-1{right:8.33333333%}.col-xs-pull-0{right:auto}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666667%}.col-xs-push-10{left:83.33333333%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666667%}.col-xs-push-7{left:58.33333333%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666667%}.col-xs-push-4{left:33.33333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.66666667%}.col-xs-push-1{left:8.33333333%}.col-xs-push-0{left:auto}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666667%}.col-xs-offset-10{margin-left:83.33333333%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666667%}.col-xs-offset-7{margin-left:58.33333333%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666667%}.col-xs-offset-4{margin-left:33.33333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.66666667%}.col-xs-offset-1{margin-left:8.33333333%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666667%}.col-sm-10{width:83.33333333%}.col-sm-9{width:75%}.col-sm-8{width:66.66666667%}.col-sm-7{width:58.33333333%}.col-sm-6{width:50%}.col-sm-5{width:41.66666667%}.col-sm-4{width:33.33333333%}.col-sm-3{width:25%}.col-sm-2{width:16.66666667%}.col-sm-1{width:8.33333333%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666667%}.col-sm-pull-10{right:83.33333333%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666667%}.col-sm-pull-7{right:58.33333333%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666667%}.col-sm-pull-4{right:33.33333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.66666667%}.col-sm-pull-1{right:8.33333333%}.col-sm-pull-0{right:auto}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666667%}.col-sm-push-10{left:83.33333333%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666667%}.col-sm-push-7{left:58.33333333%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666667%}.col-sm-push-4{left:33.33333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.66666667%}.col-sm-push-1{left:8.33333333%}.col-sm-push-0{left:auto}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666667%}.col-sm-offset-10{margin-left:83.33333333%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666667%}.col-sm-offset-7{margin-left:58.33333333%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666667%}.col-sm-offset-4{margin-left:33.33333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.66666667%}.col-sm-offset-1{margin-left:8.33333333%}.col-sm-offset-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666667%}.col-md-10{width:83.33333333%}.col-md-9{width:75%}.col-md-8{width:66.66666667%}.col-md-7{width:58.33333333%}.col-md-6{width:50%}.col-md-5{width:41.66666667%}.col-md-4{width:33.33333333%}.col-md-3{width:25%}.col-md-2{width:16.66666667%}.col-md-1{width:8.33333333%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666667%}.col-md-pull-10{right:83.33333333%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666667%}.col-md-pull-7{right:58.33333333%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666667%}.col-md-pull-4{right:33.33333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.66666667%}.col-md-pull-1{right:8.33333333%}.col-md-pull-0{right:auto}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666667%}.col-md-push-10{left:83.33333333%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666667%}.col-md-push-7{left:58.33333333%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666667%}.col-md-push-4{left:33.33333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.66666667%}.col-md-push-1{left:8.33333333%}.col-md-push-0{left:auto}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666667%}.col-md-offset-10{margin-left:83.33333333%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666667%}.col-md-offset-7{margin-left:58.33333333%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666667%}.col-md-offset-4{margin-left:33.33333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.66666667%}.col-md-offset-1{margin-left:8.33333333%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666667%}.col-lg-10{width:83.33333333%}.col-lg-9{width:75%}.col-lg-8{width:66.66666667%}.col-lg-7{width:58.33333333%}.col-lg-6{width:50%}.col-lg-5{width:41.66666667%}.col-lg-4{width:33.33333333%}.col-lg-3{width:25%}.col-lg-2{width:16.66666667%}.col-lg-1{width:8.33333333%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666667%}.col-lg-pull-10{right:83.33333333%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666667%}.col-lg-pull-7{right:58.33333333%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666667%}.col-lg-pull-4{right:33.33333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.66666667%}.col-lg-pull-1{right:8.33333333%}.col-lg-pull-0{right:auto}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666667%}.col-lg-push-10{left:83.33333333%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666667%}.col-lg-push-7{left:58.33333333%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666667%}.col-lg-push-4{left:33.33333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.66666667%}.col-lg-push-1{left:8.33333333%}.col-lg-push-0{left:auto}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666667%}.col-lg-offset-10{margin-left:83.33333333%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666667%}.col-lg-offset-7{margin-left:58.33333333%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666667%}.col-lg-offset-4{margin-left:33.33333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.66666667%}.col-lg-offset-1{margin-left:8.33333333%}.col-lg-offset-0{margin-left:0}}table{background-color:transparent}caption{padding-top:8px;padding-bottom:8px;color:#777;text-align:left}th{text-align:left}.table{width:100%;max-width:100%;margin-bottom:20px}.table>tbody>tr>td,.table>tbody>tr>th,.table>tfoot>tr>td,.table>tfoot>tr>th,.table>thead>tr>td,.table>thead>tr>th{padding:8px;line-height:1.42857143;vertical-align:top;border-top:1px solid #ddd}.table>thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table>caption+thead>tr:first-child>td,.table>caption+thead>tr:first-child>th,.table>colgroup+thead>tr:first-child>td,.table>colgroup+thead>tr:first-child>th,.table>thead:first-child>tr:first-child>td,.table>thead:first-child>tr:first-child>th{border-top:0}.table>tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed>tbody>tr>td,.table-condensed>tbody>tr>th,.table-condensed>tfoot>tr>td,.table-condensed>tfoot>tr>th,.table-condensed>thead>tr>td,.table-condensed>thead>tr>th{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>tbody>tr>td,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>td,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border:1px solid #ddd}.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border-bottom-width:2px}.table-striped>tbody>tr:nth-of-type(odd){background-color:#f9f9f9}.table-hover>tbody>tr:hover{background-color:#f5f5f5}table col[class*=col-]{position:static;display:table-column;float:none}table td[class*=col-],table th[class*=col-]{position:static;display:table-cell;float:none}.table>tbody>tr.active>td,.table>tbody>tr.active>th,.table>tbody>tr>td.active,.table>tbody>tr>th.active,.table>tfoot>tr.active>td,.table>tfoot>tr.active>th,.table>tfoot>tr>td.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>thead>tr.active>th,.table>thead>tr>td.active,.table>thead>tr>th.active{background-color:#f5f5f5}.table-hover>tbody>tr.active:hover>td,.table-hover>tbody>tr.active:hover>th,.table-hover>tbody>tr:hover>.active,.table-hover>tbody>tr>td.active:hover,.table-hover>tbody>tr>th.active:hover{background-color:#e8e8e8}.table>tbody>tr.success>td,.table>tbody>tr.success>th,.table>tbody>tr>td.success,.table>tbody>tr>th.success,.table>tfoot>tr.success>td,.table>tfoot>tr.success>th,.table>tfoot>tr>td.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>thead>tr.success>th,.table>thead>tr>td.success,.table>thead>tr>th.success{background-color:#dff0d8}.table-hover>tbody>tr.success:hover>td,.table-hover>tbody>tr.success:hover>th,.table-hover>tbody>tr:hover>.success,.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover{background-color:#d0e9c6}.table>tbody>tr.info>td,.table>tbody>tr.info>th,.table>tbody>tr>td.info,.table>tbody>tr>th.info,.table>tfoot>tr.info>td,.table>tfoot>tr.info>th,.table>tfoot>tr>td.info,.table>tfoot>tr>th.info,.table>thead>tr.info>td,.table>thead>tr.info>th,.table>thead>tr>td.info,.table>thead>tr>th.info{background-color:#d9edf7}.table-hover>tbody>tr.info:hover>td,.table-hover>tbody>tr.info:hover>th,.table-hover>tbody>tr:hover>.info,.table-hover>tbody>tr>td.info:hover,.table-hover>tbody>tr>th.info:hover{background-color:#c4e3f3}.table>tbody>tr.warning>td,.table>tbody>tr.warning>th,.table>tbody>tr>td.warning,.table>tbody>tr>th.warning,.table>tfoot>tr.warning>td,.table>tfoot>tr.warning>th,.table>tfoot>tr>td.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>thead>tr.warning>th,.table>thead>tr>td.warning,.table>thead>tr>th.warning{background-color:#fcf8e3}.table-hover>tbody>tr.warning:hover>td,.table-hover>tbody>tr.warning:hover>th,.table-hover>tbody>tr:hover>.warning,.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover{background-color:#faf2cc}.table>tbody>tr.danger>td,.table>tbody>tr.danger>th,.table>tbody>tr>td.danger,.table>tbody>tr>th.danger,.table>tfoot>tr.danger>td,.table>tfoot>tr.danger>th,.table>tfoot>tr>td.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>thead>tr.danger>th,.table>thead>tr>td.danger,.table>thead>tr>th.danger{background-color:#f2dede}.table-hover>tbody>tr.danger:hover>td,.table-hover>tbody>tr.danger:hover>th,.table-hover>tbody>tr:hover>.danger,.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover{background-color:#ebcccc}.table-responsive{min-height:.01%;overflow-x:auto}@media screen and (max-width:767px){.table-responsive{width:100%;margin-bottom:15px;overflow-y:hidden;-ms-overflow-style:-ms-autohiding-scrollbar;border:1px solid #ddd}.table-responsive>.table{margin-bottom:0}.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>td,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>thead>tr>th{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;max-width:100%;margin-bottom:5px;font-weight:700}input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=checkbox],input[type=radio]{margin:4px 0 0;margin-top:1px\9;line-height:normal}input[type=file]{display:block}input[type=range]{display:block;width:100%}select[multiple],select[size]{height:auto}input[type=file]:focus,input[type=checkbox]:focus,input[type=radio]:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}output{display:block;padding-top:7px;font-size:14px;line-height:1.42857143;color:#555}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.42857143;color:#555;background-color:#fff;background-image:none;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075);-webkit-transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;-o-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6);box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6)}.form-control::-moz-placeholder{color:#999;opacity:1}.form-control:-ms-input-placeholder{color:#999}.form-control::-webkit-input-placeholder{color:#999}.form-control::-ms-expand{background-color:transparent;border:0}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{background-color:#eee;opacity:1}.form-control[disabled],fieldset[disabled] .form-control{cursor:not-allowed}textarea.form-control{height:auto}input[type=search]{-webkit-appearance:none}@media screen and (-webkit-min-device-pixel-ratio:0){input[type=date].form-control,input[type=time].form-control,input[type=datetime-local].form-control,input[type=month].form-control{line-height:34px}.input-group-sm input[type=date],.input-group-sm input[type=time],.input-group-sm input[type=datetime-local],.input-group-sm input[type=month],input[type=date].input-sm,input[type=time].input-sm,input[type=datetime-local].input-sm,input[type=month].input-sm{line-height:30px}.input-group-lg input[type=date],.input-group-lg input[type=time],.input-group-lg input[type=datetime-local],.input-group-lg input[type=month],input[type=date].input-lg,input[type=time].input-lg,input[type=datetime-local].input-lg,input[type=month].input-lg{line-height:46px}}.form-group{margin-bottom:15px}.checkbox,.radio{position:relative;display:block;margin-top:10px;margin-bottom:10px}.checkbox label,.radio label{min-height:20px;padding-left:20px;margin-bottom:0;font-weight:400;cursor:pointer}.checkbox input[type=checkbox],.checkbox-inline input[type=checkbox],.radio input[type=radio],.radio-inline input[type=radio]{position:absolute;margin-top:4px\9;margin-left:-20px}.checkbox+.checkbox,.radio+.radio{margin-top:-5px}.checkbox-inline,.radio-inline{position:relative;display:inline-block;padding-left:20px;margin-bottom:0;font-weight:400;vertical-align:middle;cursor:pointer}.checkbox-inline+.checkbox-inline,.radio-inline+.radio-inline{margin-top:0;margin-left:10px}fieldset[disabled] input[type=checkbox],fieldset[disabled] input[type=radio],input[type=checkbox].disabled,input[type=checkbox][disabled],input[type=radio].disabled,input[type=radio][disabled]{cursor:not-allowed}.checkbox-inline.disabled,.radio-inline.disabled,fieldset[disabled] .checkbox-inline,fieldset[disabled] .radio-inline{cursor:not-allowed}.checkbox.disabled label,.radio.disabled label,fieldset[disabled] .checkbox label,fieldset[disabled] .radio label{cursor:not-allowed}.form-control-static{min-height:34px;padding-top:7px;padding-bottom:7px;margin-bottom:0}.form-control-static.input-lg,.form-control-static.input-sm{padding-right:0;padding-left:0}.input-sm{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}select[multiple].input-sm,textarea.input-sm{height:auto}.form-group-sm .form-control{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.form-group-sm select.form-control{height:30px;line-height:30px}.form-group-sm select[multiple].form-control,.form-group-sm textarea.form-control{height:auto}.form-group-sm .form-control-static{height:30px;min-height:32px;padding:6px 10px;font-size:12px;line-height:1.5}.input-lg{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-lg{height:46px;line-height:46px}select[multiple].input-lg,textarea.input-lg{height:auto}.form-group-lg .form-control{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.form-group-lg select.form-control{height:46px;line-height:46px}.form-group-lg select[multiple].form-control,.form-group-lg textarea.form-control{height:auto}.form-group-lg .form-control-static{height:46px;min-height:38px;padding:11px 16px;font-size:18px;line-height:1.3333333}.has-feedback{position:relative}.has-feedback .form-control{padding-right:42.5px}.form-control-feedback{position:absolute;top:0;right:0;z-index:2;display:block;width:34px;height:34px;line-height:34px;text-align:center;pointer-events:none}.form-group-lg .form-control+.form-control-feedback,.input-group-lg+.form-control-feedback,.input-lg+.form-control-feedback{width:46px;height:46px;line-height:46px}.form-group-sm .form-control+.form-control-feedback,.input-group-sm+.form-control-feedback,.input-sm+.form-control-feedback{width:30px;height:30px;line-height:30px}.has-success .checkbox,.has-success .checkbox-inline,.has-success .control-label,.has-success .help-block,.has-success .radio,.has-success .radio-inline,.has-success.checkbox label,.has-success.checkbox-inline label,.has-success.radio label,.has-success.radio-inline label{color:#3c763d}.has-success .form-control{border-color:#3c763d;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-success .form-control:focus{border-color:#2b542c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168}.has-success .input-group-addon{color:#3c763d;background-color:#dff0d8;border-color:#3c763d}.has-success .form-control-feedback{color:#3c763d}.has-warning .checkbox,.has-warning .checkbox-inline,.has-warning .control-label,.has-warning .help-block,.has-warning .radio,.has-warning .radio-inline,.has-warning.checkbox label,.has-warning.checkbox-inline label,.has-warning.radio label,.has-warning.radio-inline label{color:#8a6d3b}.has-warning .form-control{border-color:#8a6d3b;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-warning .form-control:focus{border-color:#66512c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b}.has-warning .input-group-addon{color:#8a6d3b;background-color:#fcf8e3;border-color:#8a6d3b}.has-warning .form-control-feedback{color:#8a6d3b}.has-error .checkbox,.has-error .checkbox-inline,.has-error .control-label,.has-error .help-block,.has-error .radio,.has-error .radio-inline,.has-error.checkbox label,.has-error.checkbox-inline label,.has-error.radio label,.has-error.radio-inline label{color:#a94442}.has-error .form-control{border-color:#a94442;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-error .form-control:focus{border-color:#843534;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483}.has-error .input-group-addon{color:#a94442;background-color:#f2dede;border-color:#a94442}.has-error .form-control-feedback{color:#a94442}.has-feedback label~.form-control-feedback{top:25px}.has-feedback label.sr-only~.form-control-feedback{top:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media (min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-static{display:inline-block}.form-inline .input-group{display:inline-table;vertical-align:middle}.form-inline .input-group .form-control,.form-inline .input-group .input-group-addon,.form-inline .input-group .input-group-btn{width:auto}.form-inline .input-group>.form-control{width:100%}.form-inline .control-label{margin-bottom:0;vertical-align:middle}.form-inline .checkbox,.form-inline .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.form-inline .checkbox label,.form-inline .radio label{padding-left:0}.form-inline .checkbox input[type=checkbox],.form-inline .radio input[type=radio]{position:relative;margin-left:0}.form-inline .has-feedback .form-control-feedback{top:0}}.form-horizontal .checkbox,.form-horizontal .checkbox-inline,.form-horizontal .radio,.form-horizontal .radio-inline{padding-top:7px;margin-top:0;margin-bottom:0}.form-horizontal .checkbox,.form-horizontal .radio{min-height:27px}.form-horizontal .form-group{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.form-horizontal .control-label{padding-top:7px;margin-bottom:0;text-align:right}}.form-horizontal .has-feedback .form-control-feedback{right:15px}@media (min-width:768px){.form-horizontal .form-group-lg .control-label{padding-top:11px;font-size:18px}}@media (min-width:768px){.form-horizontal .form-group-sm .control-label{padding-top:6px;font-size:12px}}.btn{display:inline-block;padding:6px 12px;margin-bottom:0;font-size:14px;font-weight:400;line-height:1.42857143;text-align:center;white-space:nowrap;vertical-align:middle;-ms-touch-action:manipulation;touch-action:manipulation;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-image:none;border:1px solid transparent;border-radius:4px}.btn.active.focus,.btn.active:focus,.btn.focus,.btn:active.focus,.btn:active:focus,.btn:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn.focus,.btn:focus,.btn:hover{color:#333;text-decoration:none}.btn.active,.btn:active{background-image:none;outline:0;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{cursor:not-allowed;filter:alpha(opacity=65);-webkit-box-shadow:none;box-shadow:none;opacity:.65}a.btn.disabled,fieldset[disabled] a.btn{pointer-events:none}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default.focus,.btn-default:focus{color:#333;background-color:#e6e6e6;border-color:#8c8c8c}.btn-default:hover{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default.active,.btn-default:active,.open>.dropdown-toggle.btn-default{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default.active.focus,.btn-default.active:focus,.btn-default.active:hover,.btn-default:active.focus,.btn-default:active:focus,.btn-default:active:hover,.open>.dropdown-toggle.btn-default.focus,.open>.dropdown-toggle.btn-default:focus,.open>.dropdown-toggle.btn-default:hover{color:#333;background-color:#d4d4d4;border-color:#8c8c8c}.btn-default.active,.btn-default:active,.open>.dropdown-toggle.btn-default{background-image:none}.btn-default.disabled.focus,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled].focus,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#fff;border-color:#ccc}.btn-default .badge{color:#fff;background-color:#333}.btn-primary{color:#fff;background-color:#337ab7;border-color:#2e6da4}.btn-primary.focus,.btn-primary:focus{color:#fff;background-color:#286090;border-color:#122b40}.btn-primary:hover{color:#fff;background-color:#286090;border-color:#204d74}.btn-primary.active,.btn-primary:active,.open>.dropdown-toggle.btn-primary{color:#fff;background-color:#286090;border-color:#204d74}.btn-primary.active.focus,.btn-primary.active:focus,.btn-primary.active:hover,.btn-primary:active.focus,.btn-primary:active:focus,.btn-primary:active:hover,.open>.dropdown-toggle.btn-primary.focus,.open>.dropdown-toggle.btn-primary:focus,.open>.dropdown-toggle.btn-primary:hover{color:#fff;background-color:#204d74;border-color:#122b40}.btn-primary.active,.btn-primary:active,.open>.dropdown-toggle.btn-primary{background-image:none}.btn-primary.disabled.focus,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled].focus,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#337ab7;border-color:#2e6da4}.btn-primary .badge{color:#337ab7;background-color:#fff}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success.focus,.btn-success:focus{color:#fff;background-color:#449d44;border-color:#255625}.btn-success:hover{color:#fff;background-color:#449d44;border-color:#398439}.btn-success.active,.btn-success:active,.open>.dropdown-toggle.btn-success{color:#fff;background-color:#449d44;border-color:#398439}.btn-success.active.focus,.btn-success.active:focus,.btn-success.active:hover,.btn-success:active.focus,.btn-success:active:focus,.btn-success:active:hover,.open>.dropdown-toggle.btn-success.focus,.open>.dropdown-toggle.btn-success:focus,.open>.dropdown-toggle.btn-success:hover{color:#fff;background-color:#398439;border-color:#255625}.btn-success.active,.btn-success:active,.open>.dropdown-toggle.btn-success{background-image:none}.btn-success.disabled.focus,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled].focus,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#5cb85c;border-color:#4cae4c}.btn-success .badge{color:#5cb85c;background-color:#fff}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info.focus,.btn-info:focus{color:#fff;background-color:#31b0d5;border-color:#1b6d85}.btn-info:hover{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info.active,.btn-info:active,.open>.dropdown-toggle.btn-info{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info.active.focus,.btn-info.active:focus,.btn-info.active:hover,.btn-info:active.focus,.btn-info:active:focus,.btn-info:active:hover,.open>.dropdown-toggle.btn-info.focus,.open>.dropdown-toggle.btn-info:focus,.open>.dropdown-toggle.btn-info:hover{color:#fff;background-color:#269abc;border-color:#1b6d85}.btn-info.active,.btn-info:active,.open>.dropdown-toggle.btn-info{background-image:none}.btn-info.disabled.focus,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled].focus,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#5bc0de;border-color:#46b8da}.btn-info .badge{color:#5bc0de;background-color:#fff}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning.focus,.btn-warning:focus{color:#fff;background-color:#ec971f;border-color:#985f0d}.btn-warning:hover{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning.active,.btn-warning:active,.open>.dropdown-toggle.btn-warning{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning.active.focus,.btn-warning.active:focus,.btn-warning.active:hover,.btn-warning:active.focus,.btn-warning:active:focus,.btn-warning:active:hover,.open>.dropdown-toggle.btn-warning.focus,.open>.dropdown-toggle.btn-warning:focus,.open>.dropdown-toggle.btn-warning:hover{color:#fff;background-color:#d58512;border-color:#985f0d}.btn-warning.active,.btn-warning:active,.open>.dropdown-toggle.btn-warning{background-image:none}.btn-warning.disabled.focus,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled].focus,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#f0ad4e;border-color:#eea236}.btn-warning .badge{color:#f0ad4e;background-color:#fff}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger.focus,.btn-danger:focus{color:#fff;background-color:#c9302c;border-color:#761c19}.btn-danger:hover{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger.active,.btn-danger:active,.open>.dropdown-toggle.btn-danger{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger.active.focus,.btn-danger.active:focus,.btn-danger.active:hover,.btn-danger:active.focus,.btn-danger:active:focus,.btn-danger:active:hover,.open>.dropdown-toggle.btn-danger.focus,.open>.dropdown-toggle.btn-danger:focus,.open>.dropdown-toggle.btn-danger:hover{color:#fff;background-color:#ac2925;border-color:#761c19}.btn-danger.active,.btn-danger:active,.open>.dropdown-toggle.btn-danger{background-image:none}.btn-danger.disabled.focus,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled].focus,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#d9534f;border-color:#d43f3a}.btn-danger .badge{color:#d9534f;background-color:#fff}.btn-link{font-weight:400;color:#337ab7;border-radius:0}.btn-link,.btn-link.active,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:active,.btn-link:focus,.btn-link:hover{border-color:transparent}.btn-link:focus,.btn-link:hover{color:#23527c;text-decoration:underline;background-color:transparent}.btn-link[disabled]:focus,.btn-link[disabled]:hover,fieldset[disabled] .btn-link:focus,fieldset[disabled] .btn-link:hover{color:#777;text-decoration:none}.btn-group-lg>.btn,.btn-lg{padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.btn-group-sm>.btn,.btn-sm{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-xs>.btn,.btn-xs{padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:5px}input[type=button].btn-block,input[type=reset].btn-block,input[type=submit].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}tr.collapse.in{display:table-row}tbody.collapse.in{display:table-row-group}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition-timing-function:ease;-o-transition-timing-function:ease;transition-timing-function:ease;-webkit-transition-duration:.35s;-o-transition-duration:.35s;transition-duration:.35s;-webkit-transition-property:height,visibility;-o-transition-property:height,visibility;transition-property:height,visibility}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px dashed;border-top:4px solid\9;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown,.dropup{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;font-size:14px;text-align:left;list-style:none;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,.175);box-shadow:0 6px 12px rgba(0,0,0,.175)}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.42857143;color:#333;white-space:nowrap}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{color:#262626;text-decoration:none;background-color:#f5f5f5}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{color:#fff;text-decoration:none;background-color:#337ab7;outline:0}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{color:#777}.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{text-decoration:none;cursor:not-allowed;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{right:0;left:auto}.dropdown-menu-left{right:auto;left:0}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.42857143;color:#777;white-space:nowrap}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{content:"";border-top:0;border-bottom:4px dashed;border-bottom:4px solid\9}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:2px}@media (min-width:768px){.navbar-right .dropdown-menu{right:0;left:auto}.navbar-right .dropdown-menu-left{right:auto;left:0}}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;float:left}.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:hover,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus,.btn-group>.btn:hover{z-index:2}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{margin-left:-5px}.btn-toolbar .btn,.btn-toolbar .btn-group,.btn-toolbar .input-group{float:left}.btn-toolbar>.btn,.btn-toolbar>.btn-group,.btn-toolbar>.input-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-bottom-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{padding-right:8px;padding-left:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-right:12px;padding-left:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-group.open .dropdown-toggle.btn-link{-webkit-box-shadow:none;box-shadow:none}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group,.btn-group-vertical>.btn-group>.btn{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-top-left-radius:0;border-top-right-radius:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group-vertical>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-top-right-radius:0}.btn-group-justified{display:table;width:100%;table-layout:fixed;border-collapse:separate}.btn-group-justified>.btn,.btn-group-justified>.btn-group{display:table-cell;float:none;width:1%}.btn-group-justified>.btn-group .btn{width:100%}.btn-group-justified>.btn-group .dropdown-menu{left:auto}[data-toggle=buttons]>.btn input[type=checkbox],[data-toggle=buttons]>.btn input[type=radio],[data-toggle=buttons]>.btn-group>.btn input[type=checkbox],[data-toggle=buttons]>.btn-group>.btn input[type=radio]{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.input-group{position:relative;display:table;border-collapse:separate}.input-group[class*=col-]{float:none;padding-right:0;padding-left:0}.input-group .form-control{position:relative;z-index:2;float:left;width:100%;margin-bottom:0}.input-group .form-control:focus{z-index:3}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:46px;line-height:46px}select[multiple].input-group-lg>.form-control,select[multiple].input-group-lg>.input-group-addon,select[multiple].input-group-lg>.input-group-btn>.btn,textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}select[multiple].input-group-sm>.form-control,select[multiple].input-group-sm>.input-group-addon,select[multiple].input-group-sm>.input-group-btn>.btn,textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn{height:auto}.input-group .form-control,.input-group-addon,.input-group-btn{display:table-cell}.input-group .form-control:not(:first-child):not(:last-child),.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:400;line-height:1;color:#555;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type=checkbox],.input-group-addon input[type=radio]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn-group:not(:last-child)>.btn,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:first-child>.btn-group:not(:first-child)>.btn,.input-group-btn:first-child>.btn:not(:first-child),.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group>.btn,.input-group-btn:last-child>.dropdown-toggle{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;font-size:0;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-1px}.input-group-btn>.btn:active,.input-group-btn>.btn:focus,.input-group-btn>.btn:hover{z-index:2}.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group{margin-right:-1px}.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group{z-index:2;margin-left:-1px}.nav{padding-left:0;margin-bottom:0;list-style:none}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:focus,.nav>li>a:hover{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#777}.nav>li.disabled>a:focus,.nav>li.disabled>a:hover{color:#777;text-decoration:none;cursor:not-allowed;background-color:transparent}.nav .open>a,.nav .open>a:focus,.nav .open>a:hover{background-color:#eee;border-color:#337ab7}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.42857143;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:focus,.nav-tabs>li.active>a:hover{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-tabs.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}.nav-tabs.nav-justified>li>a{margin-bottom:0}}.nav-tabs.nav-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs.nav-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border-bottom-color:#fff}}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:4px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:focus,.nav-pills>li.active>a:hover{color:#fff;background-color:#337ab7}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-justified>li{display:table-cell;width:1%}.nav-justified>li>a{margin-bottom:0}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border-bottom-color:#fff}}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.navbar{position:relative;min-height:50px;margin-bottom:20px;border:1px solid transparent}@media (min-width:768px){.navbar{border-radius:4px}}@media (min-width:768px){.navbar-header{float:left}}.navbar-collapse{padding-right:15px;padding-left:15px;overflow-x:visible;-webkit-overflow-scrolling:touch;border-top:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1)}.navbar-collapse.in{overflow-y:auto}@media (min-width:768px){.navbar-collapse{width:auto;border-top:0;-webkit-box-shadow:none;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse,.navbar-static-top .navbar-collapse{padding-right:0;padding-left:0}}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:340px}@media (max-device-width:480px) and (orientation:landscape){.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:200px}}.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:0;margin-left:0}}.navbar-static-top{z-index:1000;border-width:0 0 1px}@media (min-width:768px){.navbar-static-top{border-radius:0}}.navbar-fixed-bottom,.navbar-fixed-top{position:fixed;right:0;left:0;z-index:1030}@media (min-width:768px){.navbar-fixed-bottom,.navbar-fixed-top{border-radius:0}}.navbar-fixed-top{top:0;border-width:0 0 1px}.navbar-fixed-bottom{bottom:0;margin-bottom:0;border-width:1px 0 0}.navbar-brand{float:left;height:50px;padding:15px 15px;font-size:18px;line-height:20px}.navbar-brand:focus,.navbar-brand:hover{text-decoration:none}.navbar-brand>img{display:block}@media (min-width:768px){.navbar>.container .navbar-brand,.navbar>.container-fluid .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;padding:9px 10px;margin-top:8px;margin-right:15px;margin-bottom:8px;background-color:transparent;background-image:none;border:1px solid transparent;border-radius:4px}.navbar-toggle:focus{outline:0}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media (min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media (max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;-webkit-box-shadow:none;box-shadow:none}.navbar-nav .open .dropdown-menu .dropdown-header,.navbar-nav .open .dropdown-menu>li>a{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:focus,.navbar-nav .open .dropdown-menu>li>a:hover{background-image:none}}@media (min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}}.navbar-form{padding:10px 15px;margin-top:8px;margin-right:-15px;margin-bottom:8px;margin-left:-15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1)}@media (min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block;width:auto;vertical-align:middle}.navbar-form .form-control-static{display:inline-block}.navbar-form .input-group{display:inline-table;vertical-align:middle}.navbar-form .input-group .form-control,.navbar-form .input-group .input-group-addon,.navbar-form .input-group .input-group-btn{width:auto}.navbar-form .input-group>.form-control{width:100%}.navbar-form .control-label{margin-bottom:0;vertical-align:middle}.navbar-form .checkbox,.navbar-form .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.navbar-form .checkbox label,.navbar-form .radio label{padding-left:0}.navbar-form .checkbox input[type=checkbox],.navbar-form .radio input[type=radio]{position:relative;margin-left:0}.navbar-form .has-feedback .form-control-feedback{top:0}}@media (max-width:767px){.navbar-form .form-group{margin-bottom:5px}.navbar-form .form-group:last-child{margin-bottom:0}}@media (min-width:768px){.navbar-form{width:auto;padding-top:0;padding-bottom:0;margin-right:0;margin-left:0;border:0;-webkit-box-shadow:none;box-shadow:none}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-left-radius:0;border-top-right-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{margin-bottom:0;border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-btn.btn-sm{margin-top:10px;margin-bottom:10px}.navbar-btn.btn-xs{margin-top:14px;margin-bottom:14px}.navbar-text{margin-top:15px;margin-bottom:15px}@media (min-width:768px){.navbar-text{float:left;margin-right:15px;margin-left:15px}}@media (min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important;margin-right:-15px}.navbar-right~.navbar-right{margin-right:0}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:focus,.navbar-default .navbar-brand:hover{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:focus,.navbar-default .navbar-nav>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:focus,.navbar-default .navbar-nav>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:focus,.navbar-default .navbar-nav>.disabled>a:hover{color:#ccc;background-color:transparent}.navbar-default .navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:focus,.navbar-default .navbar-toggle:hover{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#888}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e7e7e7}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:focus,.navbar-default .navbar-nav>.open>a:hover{color:#555;background-color:#e7e7e7}@media (max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#ccc;background-color:transparent}}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-default .btn-link{color:#777}.navbar-default .btn-link:focus,.navbar-default .btn-link:hover{color:#333}.navbar-default .btn-link[disabled]:focus,.navbar-default .btn-link[disabled]:hover,fieldset[disabled] .navbar-default .btn-link:focus,fieldset[disabled] .navbar-default .btn-link:hover{color:#ccc}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#9d9d9d}.navbar-inverse .navbar-brand:focus,.navbar-inverse .navbar-brand:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a:focus,.navbar-inverse .navbar-nav>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:focus,.navbar-inverse .navbar-nav>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:focus,.navbar-inverse .navbar-nav>.disabled>a:hover{color:#444;background-color:transparent}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:focus,.navbar-inverse .navbar-toggle:hover{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:focus,.navbar-inverse .navbar-nav>.open>a:hover{color:#fff;background-color:#080808}@media (max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu .divider{background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#444;background-color:transparent}}.navbar-inverse .navbar-link{color:#9d9d9d}.navbar-inverse .navbar-link:hover{color:#fff}.navbar-inverse .btn-link{color:#9d9d9d}.navbar-inverse .btn-link:focus,.navbar-inverse .btn-link:hover{color:#fff}.navbar-inverse .btn-link[disabled]:focus,.navbar-inverse .btn-link[disabled]:hover,fieldset[disabled] .navbar-inverse .btn-link:focus,fieldset[disabled] .navbar-inverse .btn-link:hover{color:#444}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{padding:0 5px;color:#ccc;content:"/\00a0"}.breadcrumb>.active{color:#777}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;margin-left:-1px;line-height:1.42857143;color:#337ab7;text-decoration:none;background-color:#fff;border:1px solid #ddd}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-top-left-radius:4px;border-bottom-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-top-right-radius:4px;border-bottom-right-radius:4px}.pagination>li>a:focus,.pagination>li>a:hover,.pagination>li>span:focus,.pagination>li>span:hover{z-index:2;color:#23527c;background-color:#eee;border-color:#ddd}.pagination>.active>a,.pagination>.active>a:focus,.pagination>.active>a:hover,.pagination>.active>span,.pagination>.active>span:focus,.pagination>.active>span:hover{z-index:3;color:#fff;cursor:default;background-color:#337ab7;border-color:#337ab7}.pagination>.disabled>a,.pagination>.disabled>a:focus,.pagination>.disabled>a:hover,.pagination>.disabled>span,.pagination>.disabled>span:focus,.pagination>.disabled>span:hover{color:#777;cursor:not-allowed;background-color:#fff;border-color:#ddd}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px;line-height:1.3333333}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-top-left-radius:6px;border-bottom-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-top-right-radius:6px;border-bottom-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px;line-height:1.5}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-top-left-radius:3px;border-bottom-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-top-right-radius:3px;border-bottom-right-radius:3px}.pager{padding-left:0;margin:20px 0;text-align:center;list-style:none}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:focus,.pager li>a:hover{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:focus,.pager .disabled>a:hover,.pager .disabled>span{color:#777;cursor:not-allowed;background-color:#fff}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}a.label:focus,a.label:hover{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.btn .label{position:relative;top:-1px}.label-default{background-color:#777}.label-default[href]:focus,.label-default[href]:hover{background-color:#5e5e5e}.label-primary{background-color:#337ab7}.label-primary[href]:focus,.label-primary[href]:hover{background-color:#286090}.label-success{background-color:#5cb85c}.label-success[href]:focus,.label-success[href]:hover{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:focus,.label-info[href]:hover{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:focus,.label-warning[href]:hover{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:focus,.label-danger[href]:hover{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:middle;background-color:#777;border-radius:10px}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.btn-group-xs>.btn .badge,.btn-xs .badge{top:0;padding:1px 5px}a.badge:focus,a.badge:hover{color:#fff;text-decoration:none;cursor:pointer}.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#337ab7;background-color:#fff}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding-top:30px;padding-bottom:30px;margin-bottom:30px;color:inherit;background-color:#eee}.jumbotron .h1,.jumbotron h1{color:inherit}.jumbotron p{margin-bottom:15px;font-size:21px;font-weight:200}.jumbotron>hr{border-top-color:#d5d5d5}.container .jumbotron,.container-fluid .jumbotron{padding-right:15px;padding-left:15px;border-radius:6px}.jumbotron .container{max-width:100%}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron,.container-fluid .jumbotron{padding-right:60px;padding-left:60px}.jumbotron .h1,.jumbotron h1{font-size:63px}}.thumbnail{display:block;padding:4px;margin-bottom:20px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:border .2s ease-in-out;-o-transition:border .2s ease-in-out;transition:border .2s ease-in-out}.thumbnail a>img,.thumbnail>img{margin-right:auto;margin-left:auto}a.thumbnail.active,a.thumbnail:focus,a.thumbnail:hover{border-color:#337ab7}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:700}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable,.alert-dismissible{padding-right:35px}.alert-dismissable .close,.alert-dismissible .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#2b542c}.alert-info{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#245269}.alert-warning{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.alert-warning hr{border-top-color:#f7e1b5}.alert-warning .alert-link{color:#66512c}.alert-danger{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.alert-danger hr{border-top-color:#e4b9c0}.alert-danger .alert-link{color:#843534}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,.1);box-shadow:inset 0 1px 2px rgba(0,0,0,.1)}.progress-bar{float:left;width:0;height:100%;font-size:12px;line-height:20px;color:#fff;text-align:center;background-color:#337ab7;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);-webkit-transition:width .6s ease;-o-transition:width .6s ease;transition:width .6s ease}.progress-bar-striped,.progress-striped .progress-bar{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);-webkit-background-size:40px 40px;background-size:40px 40px}.progress-bar.active,.progress.active .progress-bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.media{margin-top:15px}.media:first-child{margin-top:0}.media,.media-body{overflow:hidden;zoom:1}.media-body{width:10000px}.media-object{display:block}.media-object.img-thumbnail{max-width:none}.media-right,.media>.pull-right{padding-left:10px}.media-left,.media>.pull-left{padding-right:10px}.media-body,.media-left,.media-right{display:table-cell;vertical-align:top}.media-middle{vertical-align:middle}.media-bottom{vertical-align:bottom}.media-heading{margin-top:0;margin-bottom:5px}.media-list{padding-left:0;list-style:none}.list-group{padding-left:0;margin-bottom:20px}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-left-radius:4px;border-top-right-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}a.list-group-item,button.list-group-item{color:#555}a.list-group-item .list-group-item-heading,button.list-group-item .list-group-item-heading{color:#333}a.list-group-item:focus,a.list-group-item:hover,button.list-group-item:focus,button.list-group-item:hover{color:#555;text-decoration:none;background-color:#f5f5f5}button.list-group-item{width:100%;text-align:left}.list-group-item.disabled,.list-group-item.disabled:focus,.list-group-item.disabled:hover{color:#777;cursor:not-allowed;background-color:#eee}.list-group-item.disabled .list-group-item-heading,.list-group-item.disabled:focus .list-group-item-heading,.list-group-item.disabled:hover .list-group-item-heading{color:inherit}.list-group-item.disabled .list-group-item-text,.list-group-item.disabled:focus .list-group-item-text,.list-group-item.disabled:hover .list-group-item-text{color:#777}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{z-index:2;color:#fff;background-color:#337ab7;border-color:#337ab7}.list-group-item.active .list-group-item-heading,.list-group-item.active .list-group-item-heading>.small,.list-group-item.active .list-group-item-heading>small,.list-group-item.active:focus .list-group-item-heading,.list-group-item.active:focus .list-group-item-heading>.small,.list-group-item.active:focus .list-group-item-heading>small,.list-group-item.active:hover .list-group-item-heading,.list-group-item.active:hover .list-group-item-heading>.small,.list-group-item.active:hover .list-group-item-heading>small{color:inherit}.list-group-item.active .list-group-item-text,.list-group-item.active:focus .list-group-item-text,.list-group-item.active:hover .list-group-item-text{color:#c7ddef}.list-group-item-success{color:#3c763d;background-color:#dff0d8}a.list-group-item-success,button.list-group-item-success{color:#3c763d}a.list-group-item-success .list-group-item-heading,button.list-group-item-success .list-group-item-heading{color:inherit}a.list-group-item-success:focus,a.list-group-item-success:hover,button.list-group-item-success:focus,button.list-group-item-success:hover{color:#3c763d;background-color:#d0e9c6}a.list-group-item-success.active,a.list-group-item-success.active:focus,a.list-group-item-success.active:hover,button.list-group-item-success.active,button.list-group-item-success.active:focus,button.list-group-item-success.active:hover{color:#fff;background-color:#3c763d;border-color:#3c763d}.list-group-item-info{color:#31708f;background-color:#d9edf7}a.list-group-item-info,button.list-group-item-info{color:#31708f}a.list-group-item-info .list-group-item-heading,button.list-group-item-info .list-group-item-heading{color:inherit}a.list-group-item-info:focus,a.list-group-item-info:hover,button.list-group-item-info:focus,button.list-group-item-info:hover{color:#31708f;background-color:#c4e3f3}a.list-group-item-info.active,a.list-group-item-info.active:focus,a.list-group-item-info.active:hover,button.list-group-item-info.active,button.list-group-item-info.active:focus,button.list-group-item-info.active:hover{color:#fff;background-color:#31708f;border-color:#31708f}.list-group-item-warning{color:#8a6d3b;background-color:#fcf8e3}a.list-group-item-warning,button.list-group-item-warning{color:#8a6d3b}a.list-group-item-warning .list-group-item-heading,button.list-group-item-warning .list-group-item-heading{color:inherit}a.list-group-item-warning:focus,a.list-group-item-warning:hover,button.list-group-item-warning:focus,button.list-group-item-warning:hover{color:#8a6d3b;background-color:#faf2cc}a.list-group-item-warning.active,a.list-group-item-warning.active:focus,a.list-group-item-warning.active:hover,button.list-group-item-warning.active,button.list-group-item-warning.active:focus,button.list-group-item-warning.active:hover{color:#fff;background-color:#8a6d3b;border-color:#8a6d3b}.list-group-item-danger{color:#a94442;background-color:#f2dede}a.list-group-item-danger,button.list-group-item-danger{color:#a94442}a.list-group-item-danger .list-group-item-heading,button.list-group-item-danger .list-group-item-heading{color:inherit}a.list-group-item-danger:focus,a.list-group-item-danger:hover,button.list-group-item-danger:focus,button.list-group-item-danger:hover{color:#a94442;background-color:#ebcccc}a.list-group-item-danger.active,a.list-group-item-danger.active:focus,a.list-group-item-danger.active:hover,button.list-group-item-danger.active,button.list-group-item-danger.active:focus,button.list-group-item-danger.active:hover{color:#fff;background-color:#a94442;border-color:#a94442}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,.05);box-shadow:0 1px 1px rgba(0,0,0,.05)}.panel-body{padding:15px}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-left-radius:3px;border-top-right-radius:3px}.panel-heading>.dropdown .dropdown-toggle{color:inherit}.panel-title{margin-top:0;margin-bottom:0;font-size:16px;color:inherit}.panel-title>.small,.panel-title>.small>a,.panel-title>a,.panel-title>small,.panel-title>small>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.list-group,.panel>.panel-collapse>.list-group{margin-bottom:0}.panel>.list-group .list-group-item,.panel>.panel-collapse>.list-group .list-group-item{border-width:1px 0;border-radius:0}.panel>.list-group:first-child .list-group-item:first-child,.panel>.panel-collapse>.list-group:first-child .list-group-item:first-child{border-top:0;border-top-left-radius:3px;border-top-right-radius:3px}.panel>.list-group:last-child .list-group-item:last-child,.panel>.panel-collapse>.list-group:last-child .list-group-item:last-child{border-bottom:0;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.panel-heading+.panel-collapse>.list-group .list-group-item:first-child{border-top-left-radius:0;border-top-right-radius:0}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.list-group+.panel-footer{border-top-width:0}.panel>.panel-collapse>.table,.panel>.table,.panel>.table-responsive>.table{margin-bottom:0}.panel>.panel-collapse>.table caption,.panel>.table caption,.panel>.table-responsive>.table caption{padding-right:15px;padding-left:15px}.panel>.table-responsive:first-child>.table:first-child,.panel>.table:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child,.panel>.table:first-child>thead:first-child>tr:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table:first-child>thead:first-child>tr:first-child th:first-child{border-top-left-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table:first-child>thead:first-child>tr:first-child th:last-child{border-top-right-radius:3px}.panel>.table-responsive:last-child>.table:last-child,.panel>.table:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:first-child{border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:last-child{border-bottom-right-radius:3px}.panel>.panel-body+.table,.panel>.panel-body+.table-responsive,.panel>.table+.panel-body,.panel>.table-responsive+.panel-body{border-top:1px solid #ddd}.panel>.table>tbody:first-child>tr:first-child td,.panel>.table>tbody:first-child>tr:first-child th{border-top:0}.panel>.table-bordered,.panel>.table-responsive>.table-bordered{border:0}.panel>.table-bordered>tbody>tr>td:first-child,.panel>.table-bordered>tbody>tr>th:first-child,.panel>.table-bordered>tfoot>tr>td:first-child,.panel>.table-bordered>tfoot>tr>th:first-child,.panel>.table-bordered>thead>tr>td:first-child,.panel>.table-bordered>thead>tr>th:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:first-child,.panel>.table-responsive>.table-bordered>thead>tr>td:first-child,.panel>.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.panel>.table-bordered>tbody>tr>td:last-child,.panel>.table-bordered>tbody>tr>th:last-child,.panel>.table-bordered>tfoot>tr>td:last-child,.panel>.table-bordered>tfoot>tr>th:last-child,.panel>.table-bordered>thead>tr>td:last-child,.panel>.table-bordered>thead>tr>th:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:last-child,.panel>.table-responsive>.table-bordered>thead>tr>td:last-child,.panel>.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.panel>.table-bordered>tbody>tr:first-child>td,.panel>.table-bordered>tbody>tr:first-child>th,.panel>.table-bordered>thead>tr:first-child>td,.panel>.table-bordered>thead>tr:first-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>th,.panel>.table-responsive>.table-bordered>thead>tr:first-child>td,.panel>.table-responsive>.table-bordered>thead>tr:first-child>th{border-bottom:0}.panel>.table-bordered>tbody>tr:last-child>td,.panel>.table-bordered>tbody>tr:last-child>th,.panel>.table-bordered>tfoot>tr:last-child>td,.panel>.table-bordered>tfoot>tr:last-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>th,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>td,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}.panel>.table-responsive{margin-bottom:0;border:0}.panel-group{margin-bottom:20px}.panel-group .panel{margin-bottom:0;border-radius:4px}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse>.list-group,.panel-group .panel-heading+.panel-collapse>.panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ddd}.panel-default>.panel-heading .badge{color:#f5f5f5;background-color:#333}.panel-default>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#337ab7}.panel-primary>.panel-heading{color:#fff;background-color:#337ab7;border-color:#337ab7}.panel-primary>.panel-heading+.panel-collapse>.panel-body{border-top-color:#337ab7}.panel-primary>.panel-heading .badge{color:#337ab7;background-color:#fff}.panel-primary>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#337ab7}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse>.panel-body{border-top-color:#d6e9c6}.panel-success>.panel-heading .badge{color:#dff0d8;background-color:#3c763d}.panel-success>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#d6e9c6}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse>.panel-body{border-top-color:#bce8f1}.panel-info>.panel-heading .badge{color:#d9edf7;background-color:#31708f}.panel-info>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#bce8f1}.panel-warning{border-color:#faebcc}.panel-warning>.panel-heading{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.panel-warning>.panel-heading+.panel-collapse>.panel-body{border-top-color:#faebcc}.panel-warning>.panel-heading .badge{color:#fcf8e3;background-color:#8a6d3b}.panel-warning>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#faebcc}.panel-danger{border-color:#ebccd1}.panel-danger>.panel-heading{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.panel-danger>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ebccd1}.panel-danger>.panel-heading .badge{color:#f2dede;background-color:#a94442}.panel-danger>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ebccd1}.embed-responsive{position:relative;display:block;height:0;padding:0;overflow:hidden}.embed-responsive .embed-responsive-item,.embed-responsive embed,.embed-responsive iframe,.embed-responsive object,.embed-responsive video{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive-16by9{padding-bottom:56.25%}.embed-responsive-4by3{padding-bottom:75%}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.05);box-shadow:inset 0 1px 1px rgba(0,0,0,.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}.close{float:right;font-size:21px;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;filter:alpha(opacity=20);opacity:.2}.close:focus,.close:hover{color:#000;text-decoration:none;cursor:pointer;filter:alpha(opacity=50);opacity:.5}button.close{-webkit-appearance:none;padding:0;cursor:pointer;background:0 0;border:0}.modal-open{overflow:hidden}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;display:none;overflow:hidden;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transition:-webkit-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:transform .3s ease-out;-webkit-transform:translate(0,-25%);-ms-transform:translate(0,-25%);-o-transform:translate(0,-25%);transform:translate(0,-25%)}.modal.in .modal-dialog{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);-o-transform:translate(0,0);transform:translate(0,0)}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #999;border:1px solid rgba(0,0,0,.2);border-radius:6px;outline:0;-webkit-box-shadow:0 3px 9px rgba(0,0,0,.5);box-shadow:0 3px 9px rgba(0,0,0,.5)}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{filter:alpha(opacity=0);opacity:0}.modal-backdrop.in{filter:alpha(opacity=50);opacity:.5}.modal-header{padding:15px;border-bottom:1px solid #e5e5e5}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.42857143}.modal-body{position:relative;padding:15px}.modal-footer{padding:15px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,.5);box-shadow:0 5px 15px rgba(0,0,0,.5)}.modal-sm{width:300px}}@media (min-width:992px){.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1070;display:block;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:12px;font-style:normal;font-weight:400;line-height:1.42857143;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;word-wrap:normal;white-space:normal;filter:alpha(opacity=0);opacity:0;line-break:auto}.tooltip.in{filter:alpha(opacity=90);opacity:.9}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-left .tooltip-arrow{right:5px;bottom:0;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-right .tooltip-arrow{bottom:0;left:5px;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-left .tooltip-arrow{top:0;right:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-right .tooltip-arrow{top:0;left:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.popover{position:absolute;top:0;left:0;z-index:1060;display:none;max-width:276px;padding:1px;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;font-style:normal;font-weight:400;line-height:1.42857143;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;word-wrap:normal;white-space:normal;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,.2);box-shadow:0 5px 10px rgba(0,0,0,.2);line-break:auto}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover>.arrow,.popover>.arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover>.arrow{border-width:11px}.popover>.arrow:after{content:"";border-width:10px}.popover.top>.arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,.25);border-bottom-width:0}.popover.top>.arrow:after{bottom:1px;margin-left:-10px;content:" ";border-top-color:#fff;border-bottom-width:0}.popover.right>.arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,.25);border-left-width:0}.popover.right>.arrow:after{bottom:-10px;left:1px;content:" ";border-right-color:#fff;border-left-width:0}.popover.bottom>.arrow{top:-11px;left:50%;margin-left:-11px;border-top-width:0;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,.25)}.popover.bottom>.arrow:after{top:1px;margin-left:-10px;content:" ";border-top-width:0;border-bottom-color:#fff}.popover.left>.arrow{top:50%;right:-11px;margin-top:-11px;border-right-width:0;border-left-color:#999;border-left-color:rgba(0,0,0,.25)}.popover.left>.arrow:after{right:1px;bottom:-10px;content:" ";border-right-width:0;border-left-color:#fff}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;-o-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>a>img,.carousel-inner>.item>img{line-height:1}@media all and (transform-3d),(-webkit-transform-3d){.carousel-inner>.item{-webkit-transition:-webkit-transform .6s ease-in-out;-o-transition:-o-transform .6s ease-in-out;transition:transform .6s ease-in-out;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-perspective:1000px;perspective:1000px}.carousel-inner>.item.active.right,.carousel-inner>.item.next{left:0;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}.carousel-inner>.item.active.left,.carousel-inner>.item.prev{left:0;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}.carousel-inner>.item.active,.carousel-inner>.item.next.left,.carousel-inner>.item.prev.right{left:0;-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;bottom:0;left:0;width:15%;font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6);background-color:rgba(0,0,0,0);filter:alpha(opacity=50);opacity:.5}.carousel-control.left{background-image:-webkit-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.5)),to(rgba(0,0,0,.0001)));background-image:linear-gradient(to right,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);background-repeat:repeat-x}.carousel-control.right{right:0;left:auto;background-image:-webkit-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.0001)),to(rgba(0,0,0,.5)));background-image:linear-gradient(to right,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);background-repeat:repeat-x}.carousel-control:focus,.carousel-control:hover{color:#fff;text-decoration:none;filter:alpha(opacity=90);outline:0;opacity:.9}.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{position:absolute;top:50%;z-index:5;display:inline-block;margin-top:-10px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{left:50%;margin-left:-10px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{right:50%;margin-right:-10px}.carousel-control .icon-next,.carousel-control .icon-prev{width:20px;height:20px;font-family:serif;line-height:1}.carousel-control .icon-prev:before{content:'\2039'}.carousel-control .icon-next:before{content:'\203a'}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;padding-left:0;margin-left:-30%;text-align:center;list-style:none}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;cursor:pointer;background-color:#000\9;background-color:rgba(0,0,0,0);border:1px solid #fff;border-radius:10px}.carousel-indicators .active{width:12px;height:12px;margin:0;background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{width:30px;height:30px;margin-top:-10px;font-size:30px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{margin-left:-10px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{margin-right:-10px}.carousel-caption{right:20%;left:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.btn-group-vertical>.btn-group:after,.btn-group-vertical>.btn-group:before,.btn-toolbar:after,.btn-toolbar:before,.clearfix:after,.clearfix:before,.container-fluid:after,.container-fluid:before,.container:after,.container:before,.dl-horizontal dd:after,.dl-horizontal dd:before,.form-horizontal .form-group:after,.form-horizontal .form-group:before,.modal-footer:after,.modal-footer:before,.modal-header:after,.modal-header:before,.nav:after,.nav:before,.navbar-collapse:after,.navbar-collapse:before,.navbar-header:after,.navbar-header:before,.navbar:after,.navbar:before,.pager:after,.pager:before,.panel-body:after,.panel-body:before,.row:after,.row:before{display:table;content:" "}.btn-group-vertical>.btn-group:after,.btn-toolbar:after,.clearfix:after,.container-fluid:after,.container:after,.dl-horizontal dd:after,.form-horizontal .form-group:after,.modal-footer:after,.modal-header:after,.nav:after,.navbar-collapse:after,.navbar-header:after,.navbar:after,.pager:after,.panel-body:after,.row:after{clear:both}.center-block{display:block;margin-right:auto;margin-left:auto}.pull-right{float:right!important}.pull-left{float:left!important}.hide{display:none!important}.show{display:block!important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.hidden{display:none!important}.affix{position:fixed}@-ms-viewport{width:device-width}.visible-lg,.visible-md,.visible-sm,.visible-xs{display:none!important}.visible-lg-block,.visible-lg-inline,.visible-lg-inline-block,.visible-md-block,.visible-md-inline,.visible-md-inline-block,.visible-sm-block,.visible-sm-inline,.visible-sm-inline-block,.visible-xs-block,.visible-xs-inline,.visible-xs-inline-block{display:none!important}@media (max-width:767px){.visible-xs{display:block!important}table.visible-xs{display:table!important}tr.visible-xs{display:table-row!important}td.visible-xs,th.visible-xs{display:table-cell!important}}@media (max-width:767px){.visible-xs-block{display:block!important}}@media (max-width:767px){.visible-xs-inline{display:inline!important}}@media (max-width:767px){.visible-xs-inline-block{display:inline-block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block!important}table.visible-sm{display:table!important}tr.visible-sm{display:table-row!important}td.visible-sm,th.visible-sm{display:table-cell!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-block{display:block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline{display:inline!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline-block{display:inline-block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block!important}table.visible-md{display:table!important}tr.visible-md{display:table-row!important}td.visible-md,th.visible-md{display:table-cell!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-block{display:block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline{display:inline!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline-block{display:inline-block!important}}@media (min-width:1200px){.visible-lg{display:block!important}table.visible-lg{display:table!important}tr.visible-lg{display:table-row!important}td.visible-lg,th.visible-lg{display:table-cell!important}}@media (min-width:1200px){.visible-lg-block{display:block!important}}@media (min-width:1200px){.visible-lg-inline{display:inline!important}}@media (min-width:1200px){.visible-lg-inline-block{display:inline-block!important}}@media (max-width:767px){.hidden-xs{display:none!important}}@media (min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}}@media (min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}}@media (min-width:1200px){.hidden-lg{display:none!important}}.visible-print{display:none!important}@media print{.visible-print{display:block!important}table.visible-print{display:table!important}tr.visible-print{display:table-row!important}td.visible-print,th.visible-print{display:table-cell!important}}.visible-print-block{display:none!important}@media print{.visible-print-block{display:block!important}}.visible-print-inline{display:none!important}@media print{.visible-print-inline{display:inline!important}}.visible-print-inline-block{display:none!important}@media print{.visible-print-inline-block{display:inline-block!important}}@media print{.hidden-print{display:none!important}} -/*# sourceMappingURL=bootstrap.min.css.map */
\ No newline at end of file diff --git a/old/server/app/static/css/dullbrown-theme.css b/old/server/app/static/css/dullbrown-theme.css deleted file mode 100644 index 98aff038..00000000 --- a/old/server/app/static/css/dullbrown-theme.css +++ /dev/null @@ -1,502 +0,0 @@ -* { box-sizing: border-box; } -html { - margin: 0; padding: 0; - width: 100%; height: 100%; -} -body { - margin: 0; padding: 0; - width: 100%; height: 100%; - font-family: Helvetica, sans-serif; -} -body, .modal, #footer { - /* Permalink - use to edit and share this gradient: http://colorzilla.com/gradient-editor/#a5ce3e+0,ffffff+50,a5ce3e+100 */ - background: #7B7568; /* Old browsers */ - background: -moz-linear-gradient(left, #7B7568 0%, #ffffff 50%, #7B7568 100%); /* FF3.6-15 */ - background: -webkit-linear-gradient(left, #7B7568 0%,#ffffff 50%,#7B7568 100%); /* Chrome10-25,Safari5.1-6 */ - background: linear-gradient(to right, #7B7568 0%,#ffffff 50%,#7B7568 100%); /* W3C, IE10+, FF16+, Chrome26+, Opera12+, Safari7+ */ - filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#7B7568', endColorstr='#a5ce3e',GradientType=1 ); /* IE6-9 */ -} - -/* ------------------------------------------------ */ -/* navbar */ -.navbar-default a.navbar-brand{ -} -.navbar-default{ - background: transparent; -} -.navbar{ - margin-bottom: 0; - border:0; -} - -.navbar-default .navbar-brand a{ - color:#ccc; -} -.navbar-default a.navbar-brand{ - color:#ccc; -} -.navbar-default a.navbar-brand:hover{ - color:#fff; -} - -/* Hamburger */ -.navbar-default .navbar-toggle{ - color:#ccc; -} -.navbar-default .navbar-toggle .icon-bar{ - color:#ccc; -} -.navbar-default .navbar-toggle .icon-bar:hover{ - color:#fff; -} -.navbar-default .navbar-toggle:focus, .navbar-default .navbar-toggle:hover{ - color:#fff; - background: transparent; -} -.navbar{ - border-radius: 0px; - min-height:30px; -} -.navbar-default .navbar-text{ - color:#ccc; -} -.navbar-default .navbar-nav>li>a{ - color:#ccc; -} -.navbar-default .navbar-nav>li>a:hover{ - color:#fff; -} -.navbar-default .navbar-toggle .icon-bar{ - background-color:#ccc; -} -.navbar-default .navbar-toggle:hover .icon-bar{ - background-color: #eee; -} -.navbar-default .navbar-toggle:hover { - border-color: #fff; -} -.navbar-default .navbar-collapse, .navbar-default .navbar-form{ - border:0; -} - -/* ------------------------------------------------ */ -/* Jumbotron */ -.jumbotron { - padding-top: 0px; - padding-bottom: 0px; - margin-bottom: 0px; - color: inherit; -} -.jumbotron{ - background: transparent; - color:black; -} -.jumbotron h1{ - color:#ddd; - margin-bottom:0px; -} -.jumbotron a.btn-primary{ - background:#ddd; - color:#333; -} -.jumbotron a.btn-primary:hover{ - background:#eee; - color:#222; -} -.jumbotron p > a.jcallout{ - color:#eee; - padding-bottom: 3px; - border-bottom:1px dotted; - text-decoration: none; -} -.jumbotron p > a.jcallout:hover{ - color:#fff; - border-bottom:1px solid #ccc; - text-decoration: none; -} -.jumbotron a.btn-default{ - color:#eee; - border:1px solid #eee; - background: transparent; -} -.jumbotron a.btn-default:hover{ - background: #22f; - border:1px solid #ccc; -} -.jumbotron a.btn-default:active{ - color:#eee; - border:1px solid #ccc; -} - -/* Input button override --------------------------------------------------- */ -/*input[type="file"] { - display: none; -} -input[type="button"] { - display: none; -}*/ -input.hidden_input{ - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; - opacity: 0; -} - -/* Global styles --------------------------------------------------- */ -h1,h2,h3,h4,h5,h6 { - font-weight: bold; - font-style: italic; - text-align: center; -} -h1 { - font-size:56px; - margin-top: 20px; - margin-bottom: 0; -} -h2 { - font-size:20px; - margin-top: 0; - margin-bottom: 24px; -} -ul { - list-style: none; - margin:0; - padding:0; -} -li { - display: inline-block; -} -img.img_responsive_dull { - max-width: 100%; - height: auto; -} -#photo_area{ - width: 512px; - height: 512px; - max-width: 97vw; - max-height: 97vw; - min-width: 240px; - min-height: 240px; - margin:0 auto; - text-align: center; -} -.dash_border{ - background-color: rgba(255,255,255,.2); - border:1px dashed #000; -} - -label { - display: block; -} - -div.center_inner { - position: relative; - top: 50%; - -webkit-transform: translateY(-50%); - -ms-transform: translateY(-50%); - transform: translateY(-50%); -} -#upload_controls{ - margin-top:25px; - display: none; -} -#restart_btn, #rotate_btn, #upload_btn, #dropdown_btn { - display: inline-block; - margin-left:5px; - margin-right:5px; -} -.custom-file-upload { - display: inline-block; - padding: 6px 12px; - cursor: pointer; -} - -.align_center{ - text-align: center; -} - -ul.action-buttons{ - margin-top: 40px; - list-style: none; - margin-left: 0; - padding-left:0; -} -li { - list-style: none; - margin-left: 0; - padding-left:0; - /*margin-bottom:20px;*/ -} - -.btn { - display: inline-block; - color: #333; - background-color: #fff; - border: 1px solid #adadad; - font-family: Helvetica, sans-serif; - padding: 6px 12px; - margin: 0; - font-size: 14px; - font-weight: 400; - line-height: 1.42857143; - text-align: center; - white-space: nowrap; - vertical-align: middle; - -ms-touch-action: manipulation; - touch-action: manipulation; - cursor: pointer; - -webkit-user-select: none; - -moz-user-select: none; - -ms-user-select: none; - user-select: none; - border-radius: 4px; - text-decoration: none; - transition: all 0.15s; -} -.desktop .btn:hover { - background-color: #fff; -} -.btn.btn-lg { - padding: 10px 16px; - font-size: 18px; - line-height: 1.3333333; - border-radius: 6px; -} -.btn.btn-sm { - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} -.btn.btn-important { - border-width: 2px; - border-color: #444; -} -#photo_area { - position: relative; - cursor: pointer; -} -#restart_btn { - position: relative; - cursor: pointer; -} -input[type=file] { - cursor: pointer; - opacity: 0; - position: absolute; - top: 0; left: 0; - width: 100%; height: 100%; -} -.desktop #photo_area .btn { - background: #fff; -} -.desktop #photo_area .btn:hover { - background: #eee; -} -.consent_box { - margin-top: 10px; - font-size: smaller; - color: #444; -} - -/* Intro page --------------------------------------------------- */ -canvas { - display: block; -} -.photo { - display: none; - width:100%; - height:100%; -} - -/* form visibility */ - -#preloader_anim { - display: none; -} -#about_btn { - margin: 20px 0px 80px 0; -} -#share_btns { - display: none; - margin:20px; -} -.notice { - color: #444; - font-size: small; -} -a.btn-default { - background-color: transparent; -} -a.btn-default:hover { - color: #333; - background-color: #fff; - border-color: #adadad; -} -.debug-view { - margin-bottom: 20px; - font-size:14px; - color:#333; -} -.debug-view img { - margin-bottom:4px; -} -#full_results, #hide_more { - display: none; -} - -select { - display: inline-block; - height: 34px; - padding: 6px 12px; - font-size: 14px; - line-height: 1.428571429; - color: #555; - vertical-align: middle; - background-color: #eee; - background-image: none; - border: 1px solid #bbb; - border-radius: 4px; - transition: all .15s; - cursor: pointer; -} -.desktop select:hover { - background-color: #fff; - cursor: pointer; -} - -/* About ---------------------------------------------------- */ -.modal { - pointer-events: none; - opacity: 0; - width: 100%; - height: 100%; - position: fixed; - top: 0; left: 0; - transition: all 0.2s; -} -.modal.visible { - pointer-events: auto; - opacity: 1; -} -.modal p { - font-size: 16px; - line-height: 24px; -} -.modal .inner { - margin: 10vh auto; - background: rgba(255,255,255,0.5); - padding: 20px 20px 40px 20px; - width: 600px; - max-width: 90vw; -} -.modal .content { - margin-bottom: 20px; -} - -/* Result ---------------------------------------------------- */ -.result_view { - display: none; - text-align: center; -} -.final_result img { - width: 512px; - height: 512px; - border: 1px dashed #000; - margin: 10px; -} -.all_results { - display: none; -} -.all_results div { - margin-bottom: 20px; -} -.all_results img { - width: 384px; - height: 384px; - margin: 10px; -} - -.made_with { - margin-bottom: 10px; -} -#delete_btns { - margin-top: 10px; - font-size: 10px; -} -a#destroy_data { - color: #888; -} -.desktop a#destroy_data:hover { - color: #f00; -} - -/* Footer ---------------------------------------------------- */ -#footer{ - /*background: #ddd;*/ - /*padding: 20px 0;*/ - /*position: fixed;*/ - bottom: 0; - width: 100%; - text-align: center; - padding-top: 40px; - padding-bottom: 20px; -} -#footer ul li a { - font-weight: bold; - text-decoration: none; -} -#footer a { - color:#333; - text-decoration: underline; -} -#footer a:hover { - color:#111; -} - -@media screen and (max-width: 500px) { - .modal { - width: 100vw; - height: 100vh; - display: flex; - align-items: center; - justify-content: center; - } - .modal .inner { - margin: 0; - width: 100vw; - height: 100vh; - max-width: 100vw; - } -} - - -/* Gallery */ - -.gallery-preview-row{ - margin-bottom: 40px -} -.gallery-preview-row img{ - width:100%; - padding:2px; -} - -@media (max-width: 600px) { - #preloader_anim { - position: relative; - top: -50px; - } -} - -/*hide dropdown*/ -#dropdown_btn{ - display: none; -}
\ No newline at end of file diff --git a/old/server/app/static/css/projector.css b/old/server/app/static/css/projector.css deleted file mode 100644 index 401f0dff..00000000 --- a/old/server/app/static/css/projector.css +++ /dev/null @@ -1,52 +0,0 @@ -html, body, #wrapper { - height:100%; - width: 100%; - margin: 0; - padding: 0; - border: 0; - background-color: #000; -} -table{ - padding:0; - margin:0; - border-spacing: 0; -} -table td{ - padding:0; - margin:0; -} -#wrapper td { - vertical-align: middle; - text-align: center; -} -#wrapper img{ - margin-top:-350px; - margin-left:-350px; -} -.left{ - background-color: #000; -} -.right{ - background-color: #000;; -} - -#container{ -} -#container-left{ - width:50%; - float:left; - position: relative; -} -#container-right{ - width:50%; - float:right; - position: relative; -} - -.cycle{position:relative;display: none;} -.cycle img{position:absolute;z-index:1} -.cycle img.active{z-index:100} -.cycle img{ - width:700px; - height:700px; -}
\ No newline at end of file diff --git a/old/server/app/static/js/app.js b/old/server/app/static/js/app.js deleted file mode 100644 index 454d5c37..00000000 --- a/old/server/app/static/js/app.js +++ /dev/null @@ -1,158 +0,0 @@ -var app = (function(){ - - var app = {} - - app.init = function(){ - upload.init() - app.bind() - app.build() - app.resize() - app.route() - } - app.bind = function(){ - $(window).on('resize', app.resize) - $(".about_button").on('click', app.about_show) - $(".privacy_button").on('click', app.privacy_show) - $(".modal").on('click', app.modal_hide) - $(".modal .btn").on('click', app.modal_hide) - $(".modal .inner").on('click', preventDefault) - $("#destroy_data").on('click', app.destroyData) - $("#show_all_results").on('click', app.showAllResults) - } - app.build = function(){ - var items = JSON.parse(decodeEntities($("#dropdown_options").html())) - var $dropdown = $("#dropdown") - var options = Object.keys(items).sort().map(key => { - var item = items[key] - var option = document.createElement('option') - option.value = item.name - option.innerHTML = item.title - if (item.selected) option.selected = true - $dropdown.append(option) - }) - var loader = new Image () - loader.src = '/static/img/loader.gif' - } - app.resize = function(){ - var $el = $('#photo_area') - var w = $el.width() - $el.height($el.width()) - } - app.route = function(){ - const path = window.location.pathname.split('/') - path.shift() - switch (path[0]) { - case 'd': - app.processingComplete(path[1], true) // public - break - case 'p': - app.processingComplete(path[1], false) // private - break - case 'about': - app.about_show() - break - case 'privacy': - app.privacy_show() - break - default: - // load index, default state - break - } - } - - /* upload UI changes */ - - app.didPickPhoto = function(){ - $('#upload_controls').fadeIn() - $('#user_photo_canvas').show() - $('#take_photo_btn').hide() - } - app.didClickUpload = function(){ - $('#upload_controls').slideUp('fast') - $('#user_photo_canvas').hide() - $('#preloader_anim').fadeIn('fast') - $('#progress').fadeIn() - } - app.uploadDidComplete = function(){ - $('#preloader_anim').hide() - $('#progress').hide() - } - app.uploadDidComplete = function(){ - // $('#preloader_anim').hide() - // $('#progress').hide() - } - app.updateProgress = function(message, percentage){ - message = message || "Processing..." - percentage = percentage || 0 - $("#progress").html(message) - } - app.processingComplete = function(uuid, is_public){ - $('#preloader_anim').hide() - $('#progress').hide() - // - $("header h2").html("Your dull result") - $(".upload_view").hide() - $(".results_view").show() - var endpoint = is_public ? 'json' : 'json_private' - $.getJSON('/static/media/' + endpoint + '/' + uuid + '.json', function(data){ - console.log(data) - var template = $("#result_template").html() - var final_result = new Image - final_result.src = data.files[data.files.length-1].fn - $(".final_result").empty() - $(".all_results").empty() - $(".final_result").append(final_result) - data.files.forEach(function(file){ - var t = template.replace(/{img}/, file.fn).replace(/{title}/, file.title) - $(".all_results").append(t) - }) - $(".result_view").show() - $(".permalink").attr('href', window.location.href) - }).fail(function(){ - console.log('error fetching json') - window.location.href = '/' - }) - } - var detailed = false - app.showAllResults = function(){ - if (!detailed) { - detailed = true - $(this).html('Hide') - $(".all_results").fadeIn('fast') - } else { - detailed = false - $(this).html('Detailed Analysis') - $(".all_results").slideUp('fast') - } - } - app.destroyData = function(){ - var uuid = window.location.pathname.split('/')[2] - var confirmed = confirm("Do you really want to delete your dull dream?") - if (confirmed) { - $.get( [window.location.pathname, 'destroy'].join('/').replace('//', '/') ).always(function(){ - alert('Dull dream deleted!') - window.location.href = '/' - }) - } - } - - /* modals */ - - app.about_show = function(e){ - e.preventDefault() - $(".about_view").addClass('visible') - } - app.privacy_show = function(e){ - e.preventDefault() - $(".privacy_view").addClass('visible') - } - app.modal_hide = function(e){ - e.preventDefault() - e.stopPropagation() - $(".modal").removeClass('visible') - } - - document.addEventListener('DOMContentLoaded', app.init) - - return app -})()
\ No newline at end of file diff --git a/old/server/app/static/js/upload.js b/old/server/app/static/js/upload.js deleted file mode 100644 index 27437e43..00000000 --- a/old/server/app/static/js/upload.js +++ /dev/null @@ -1,319 +0,0 @@ -var messages = { - is_processing: "Running semantic segmentation...", - upload_failed: "Error attempting to upload the file.", - upload_cancelled: "Upload cancelled or browser dropped connection.", - unable_to_compute: "We're sorry! We were unable to compute your image.", - pending: "Sending to Generative Adversarial Network...", - complete: "Processing complete!", -} - -var upload = (function(){ - var upload = {} - var uploading = false - - var MAX_SIDE = 512 - - upload.init = function(){ - upload.bind() - } - - upload.bind = function(){ - $("input[type=file]").on('change', upload.change) - $("#upload_btn").on('click', upload.go) - document.body.addEventListener("dragover", upload.dragover) - document.body.addEventListener("dragleave", upload.dragover) - document.body.addEventListener("drop", upload.change) - } - - upload.dragover = function(e){ - e.stopPropagation() - e.preventDefault() - } - - upload.change = function(e){ - e.preventDefault() - var files = e.dataTransfer ? e.dataTransfer.files : e.target.files - if (files.length) { - var file = files[files.length - 1] - if (!file.type.match('image.*')) - return - var reader = new FileReader() - reader.onload = onReaderLoad - reader.readAsDataURL(file) - } - function onReaderLoad(e) { - // Don't leak! - reader.onload = null - var img = new Image - img.onload = function(){ - img.onload = null - upload.ready(img) - } - img.src = e.target.result - } - } - - upload.ready = function(img){ - var resized = renderToCanvas(img, { correctOrientation: true }) - var canvas = document.querySelector('#user_photo_canvas') - ctx = canvas.getContext('2d') - ctx.fillStyle = 'black' - ctx.fillRect(0, 0, MAX_SIDE, MAX_SIDE) - var x_offset = (MAX_SIDE - resized.width) / 2 - var y_offset = (MAX_SIDE - resized.height) / 2 - - ctx.drawImage(resized, x_offset, y_offset) - app.didPickPhoto() - } - - upload.go = function(){ - if (uploading) return - uploading = true - app.didClickUpload() - try { - var canvas = document.querySelector('#user_photo_canvas') - var cb = canvas.toBlob(function(blob){ - upload.send(blob) - }, 'image/jpeg', 89) - } catch(e){ - app.updateProgress(messages.unable_to_compute) - } - } - - upload.send = function(blob){ - console.log("sending upload...") - var fd = new FormData() - fd.append('user_image', blob) - fd.append('ext', 'jpg') - fd.append('style', $("#dropdown").val()) - fd.append('agree', $("#agree").val() || 0) - - var xhr = new XMLHttpRequest() - xhr.upload.addEventListener("progress", upload.progress, false) - xhr.addEventListener("load", upload.complete, false) - xhr.addEventListener("error", upload.failed, false) - xhr.addEventListener("abort", upload.canceled, false) - xhr.open("POST", "/upload") - xhr.send(fd) - } - - upload.progress = function (e) { - if (e.lengthComputable) { - var percentComplete = Math.round(e.loaded * 100 / e.total) - if (percentComplete > 99) { - app.updateProgress(messages.is_processing) - } else { - app.updateProgress("Uploaded " + percentComplete.toString() + '%') - } - } - else { - app.updateProgress(messages.unable_to_compute) - } - } - - upload.complete = function (e) { - uploading = false - try { - var data = JSON.parse(e.target.responseText) - } catch (e) { - return app.updateProgress(messages.upload_failed) - } - app.uploadDidComplete() - upload.data = data - upload.task_progress(data.task_url) - } - - upload.failed = function (evt) { - uploading = false - app.updateProgress(messages.upload_failed) - } - - upload.cancelled = function (evt) { - uploading = false - app.updateProgress(messages.upload_cancelled) - } - - upload.task_progress = function (status_url) { - var is_public = $("#agree").val() || 0 - var uuid = upload.data.uuid - $.getJSON(status_url, function(data){ - console.log(data) - var alive = true - var delay = 500 - switch(data.state) { - case 'PENDING': - app.updateProgress(messages.pending) - delay = 2000 - break - case 'PROCESSING': - app.updateProgress(data.message, data.percent) - delay = 500 - break - case 'SUCCESS': - app.updateProgress(messages.complete) - if (is_public) { - history.pushState({}, 'DullDream', '/d/' + uuid) - } else { - history.pushState({}, 'DullDream', '/p/' + uuid) - } - app.processingComplete(uuid, is_public) // truthy if private - alive = false - break - default: - // NB: error state - alive = false - break - } - if (alive) { - setTimeout(function() { - upload.task_progress(status_url) - }, delay) - } - }) - } - - - function renderToCanvas(img, options) { - if (!img) return - options = options || {} - - // Canvas max size for any side - var maxSize = MAX_SIDE - var canvas = document.createElement('canvas') - var ctx = canvas.getContext('2d') - var initialScale = options.scale || 1 - // Scale to needed to constrain canvas to max size - var scale = getScale(img.width * initialScale, img.height * initialScale, maxSize, maxSize, true) - // Still need to apply the user defined scale - scale *= initialScale - var width = canvas.width = Math.round(img.width * scale) - var height = canvas.height = Math.round(img.height * scale) - var correctOrientation = options.correctOrientation - var jpeg = !!img.src.match(/data:image\/jpeg|\.jpeg$|\.jpg$/i) - var hasDataURI = !!img.src.match(/^data:/) - - ctx.save() - - // Can only correct orientation on JPEGs represented as dataURIs - // for the time being - if (correctOrientation && jpeg && hasDataURI) { - applyOrientationCorrection(canvas, ctx, img.src) - } - // Resize image if too large - if (scale !== 1) { - ctx.scale(scale, scale) - } - - ctx.drawImage(img, 0, 0) - ctx.restore() - - return canvas - } - - function getScale(width, height, viewportWidth, viewportHeight, fillViewport) { - fillViewport = !!fillViewport - var landscape = (width / height) > (viewportWidth / viewportHeight) - if (landscape) { - if (fillViewport) { - return fitVertical() - } else if (width > viewportWidth) { - return fitHorizontal() - } - } else { - if (fillViewport) { - return fitHorizontal() - } else if (height > viewportHeight) { - return fitVertical() - } - } - return 1 - - function fitHorizontal() { - return viewportWidth / width - } - - function fitVertical() { - return viewportHeight / height - } - } - - function applyOrientationCorrection(canvas, ctx, uri) { - var orientation = getOrientation(uri) - // Only apply transform if there is some non-normal orientation - if (orientation && orientation !== 1) { - var transform = orientationToTransform[orientation] - var rotation = transform.rotation - var mirror = transform.mirror - var flipAspect = rotation === 90 || rotation === 270 - if (flipAspect) { - // Fancy schmancy swap algo - canvas.width = canvas.height + canvas.width - canvas.height = canvas.width - canvas.height - canvas.width -= canvas.height - } - if (rotation > 0) { - applyRotation(canvas, ctx, rotation) - } - } - } - - function applyRotation(canvas, ctx, deg) { - var radians = deg * (Math.PI / 180) - if (deg === 90) { - ctx.translate(canvas.width, 0) - } else if (deg === 180) { - ctx.translate(canvas.width, canvas.height) - } else if (deg == 270) { - ctx.translate(0, canvas.height) - } - ctx.rotate(radians) - } - - function getOrientation (uri) { - var exif = new ExifReader - // Split off the base64 data - var base64String = uri.split(',')[1] - // Read off first 128KB, which is all we need to - // get the EXIF data - var arr = base64ToUint8Array(base64String, 0, Math.pow(2, 17)) - try { - exif.load(arr.buffer) - return exif.getTagValue('Orientation') - } catch (err) { - return 1 - } - } - - function base64ToUint8Array(string, start, finish) { - var start = start || 0 - var finish = finish || string.length - // atob that shit - var binary = atob(string) - var buffer = new Uint8Array(binary.length) - for (var i = start; i < finish; i++) { - buffer[i] = binary.charCodeAt(i) - } - return buffer - } - - /** - * Mapping from EXIF orientation values to data - * regarding the rotation and mirroring necessary to - * render the canvas correctly - * Derived from: - * http://www.daveperrett.com/articles/2012/07/28/exif-orientation-handling-is-a-ghetto/ - */ - var orientationToTransform = { - 1: { rotation: 0, mirror: false }, - 2: { rotation: 0, mirror: true }, - 3: { rotation: 180, mirror: false }, - 4: { rotation: 180, mirror: true }, - 5: { rotation: 90, mirror: true }, - 6: { rotation: 90, mirror: false }, - 7: { rotation: 270, mirror: true }, - 8: { rotation: 270, mirror: false } - } - - - return upload -})()
\ No newline at end of file diff --git a/old/server/app/static/js/util.js b/old/server/app/static/js/util.js deleted file mode 100644 index 851f634a..00000000 --- a/old/server/app/static/js/util.js +++ /dev/null @@ -1,32 +0,0 @@ -var is_iphone = (navigator.userAgent.match(/iPhone/i)) || (navigator.userAgent.match(/iPod/i)) -var is_ipad = (navigator.userAgent.match(/iPad/i)) -var is_android = (navigator.userAgent.match(/Android/i)) -var is_mobile = is_iphone || is_ipad || is_android -var is_desktop = ! is_mobile; - -document.body.parentNode.classList.add(is_desktop ? 'desktop' : 'mobile') - -function preventDefault(e){ - e.preventDefault() - e.stopPropagation() -} - -var decodeEntities = (function() { - // this prevents any overhead from creating the object each time - var element = document.createElement('div'); - - function decodeHTMLEntities (str) { - if(str && typeof str === 'string') { - // strip script/html tags - str = str.replace(/<script[^>]*>([\S\s]*?)<\/script>/gmi, ''); - str = str.replace(/<\/?\w(?:[^"'>]|"[^"]*"|'[^']*')*>/gmi, ''); - element.innerHTML = str; - str = element.textContent; - element.textContent = ''; - } - - return str; - } - - return decodeHTMLEntities; -})();
\ No newline at end of file diff --git a/old/server/app/static/js/vendor/ExifReader.js b/old/server/app/static/js/vendor/ExifReader.js deleted file mode 100644 index a8343ede..00000000 --- a/old/server/app/static/js/vendor/ExifReader.js +++ /dev/null @@ -1,1363 +0,0 @@ -// Generated by CoffeeScript 1.6.2 -/* -# ExifReader 1.1.1 -# http://github.com/mattiasw/exifreader -# Copyright (C) 2011-2014 Mattias Wallander <mattias@wallander.eu> -# Licensed under the GNU Lesser General Public License version 3 or later -# See license text at http://www.gnu.org/licenses/lgpl.txt -*/ - - -(function() { - (typeof exports !== "undefined" && exports !== null ? exports : this).ExifReader = (function() { - ExifReader.prototype._MIN_DATA_BUFFER_LENGTH = 2; - - ExifReader.prototype._JPEG_ID_SIZE = 2; - - ExifReader.prototype._JPEG_ID = 0xffd8; - - ExifReader.prototype._APP_MARKER_SIZE = 2; - - ExifReader.prototype._APP0_MARKER = 0xffe0; - - ExifReader.prototype._APP1_MARKER = 0xffe1; - - ExifReader.prototype._APP15_MARKER = 0xffef; - - ExifReader.prototype._APP_ID_OFFSET = 4; - - ExifReader.prototype._BYTES_Exif = 0x45786966; - - ExifReader.prototype._TIFF_HEADER_OFFSET = 10; - - ExifReader.prototype._BYTE_ORDER_BIG_ENDIAN = 0x4949; - - ExifReader.prototype._BYTE_ORDER_LITTLE_ENDIAN = 0x4d4d; - - function ExifReader() { - var _this = this; - - this._getTagValueAt = { - 1: function(offset) { - return _this._getByteAt(offset); - }, - 2: function(offset) { - return _this._getAsciiAt(offset); - }, - 3: function(offset) { - return _this._getShortAt(offset); - }, - 4: function(offset) { - return _this._getLongAt(offset); - }, - 5: function(offset) { - return _this._getRationalAt(offset); - }, - 7: function(offset) { - return _this._getUndefinedAt(offset); - }, - 9: function(offset) { - return _this._getSlongAt(offset); - }, - 10: function(offset) { - return _this._getSrationalAt(offset); - } - }; - this._tiffHeaderOffset = 0; - } - - /* - # Loads all the Exif tags from the specified image file buffer. - # - # data ArrayBuffer Image file data - */ - - - ExifReader.prototype.load = function(data) { - return this.loadView(new DataView(data)); - }; - - /* - # Loads all the Exif tags from the specified image file buffer view. Probably - # used when DataView isn't supported by the browser. - # - # @_dataView DataView Image file data view - */ - - - ExifReader.prototype.loadView = function(_dataView) { - this._dataView = _dataView; - this._tags = {}; - this._checkImageHeader(); - this._readTags(); - return this._dataView = null; - }; - - ExifReader.prototype._checkImageHeader = function() { - if (this._dataView.byteLength < this._MIN_DATA_BUFFER_LENGTH || this._dataView.getUint16(0, false) !== this._JPEG_ID) { - throw new Error('Invalid image format'); - } - this._parseAppMarkers(this._dataView); - if (!this._hasExifData()) { - throw new Error('No Exif data'); - } - }; - - ExifReader.prototype._parseAppMarkers = function(dataView) { - var appMarkerPosition, fieldLength, _results; - - appMarkerPosition = this._JPEG_ID_SIZE; - _results = []; - while (true) { - if (dataView.byteLength < appMarkerPosition + this._APP_ID_OFFSET + 5) { - break; - } - if (this._isApp1ExifMarker(dataView, appMarkerPosition)) { - fieldLength = dataView.getUint16(appMarkerPosition + this._APP_MARKER_SIZE, false); - this._tiffHeaderOffset = appMarkerPosition + this._TIFF_HEADER_OFFSET; - } else if (this._isAppMarker(dataView, appMarkerPosition)) { - fieldLength = dataView.getUint16(appMarkerPosition + this._APP_MARKER_SIZE, false); - } else { - break; - } - _results.push(appMarkerPosition += this._APP_MARKER_SIZE + fieldLength); - } - return _results; - }; - - ExifReader.prototype._isApp1ExifMarker = function(dataView, appMarkerPosition) { - return dataView.getUint16(appMarkerPosition, false) === this._APP1_MARKER && dataView.getUint32(appMarkerPosition + this._APP_ID_OFFSET, false) === this._BYTES_Exif && dataView.getUint8(appMarkerPosition + this._APP_ID_OFFSET + 4, false) === 0x00; - }; - - ExifReader.prototype._isAppMarker = function(dataView, appMarkerPosition) { - var appMarker; - - appMarker = dataView.getUint16(appMarkerPosition, false); - return appMarker >= this._APP0_MARKER && appMarker <= this._APP15_MARKER; - }; - - ExifReader.prototype._hasExifData = function() { - return this._tiffHeaderOffset !== 0; - }; - - ExifReader.prototype._readTags = function() { - this._setByteOrder(); - this._read0thIfd(); - this._readExifIfd(); - this._readGpsIfd(); - return this._readInteroperabilityIfd(); - }; - - ExifReader.prototype._setByteOrder = function() { - if (this._dataView.getUint16(this._tiffHeaderOffset) === this._BYTE_ORDER_BIG_ENDIAN) { - return this._littleEndian = true; - } else if (this._dataView.getUint16(this._tiffHeaderOffset) === this._BYTE_ORDER_LITTLE_ENDIAN) { - return this._littleEndian = false; - } else { - throw new Error('Illegal byte order value. Faulty image.'); - } - }; - - ExifReader.prototype._read0thIfd = function() { - var ifdOffset; - - ifdOffset = this._getIfdOffset(); - return this._readIfd('0th', ifdOffset); - }; - - ExifReader.prototype._getIfdOffset = function() { - return this._tiffHeaderOffset + this._getLongAt(this._tiffHeaderOffset + 4); - }; - - ExifReader.prototype._readExifIfd = function() { - var ifdOffset; - - if (this._tags['Exif IFD Pointer'] != null) { - ifdOffset = this._tiffHeaderOffset + this._tags['Exif IFD Pointer'].value; - return this._readIfd('exif', ifdOffset); - } - }; - - ExifReader.prototype._readGpsIfd = function() { - var ifdOffset; - - if (this._tags['GPS Info IFD Pointer'] != null) { - ifdOffset = this._tiffHeaderOffset + this._tags['GPS Info IFD Pointer'].value; - return this._readIfd('gps', ifdOffset); - } - }; - - ExifReader.prototype._readInteroperabilityIfd = function() { - var ifdOffset; - - if (this._tags['Interoperability IFD Pointer'] != null) { - ifdOffset = this._tiffHeaderOffset + this._tags['Interoperability IFD Pointer'].value; - return this._readIfd('interoperability', ifdOffset); - } - }; - - ExifReader.prototype._readIfd = function(ifdType, offset) { - var fieldIndex, numberOfFields, tag, _i, _results; - - numberOfFields = this._getShortAt(offset); - offset += 2; - _results = []; - for (fieldIndex = _i = 0; 0 <= numberOfFields ? _i < numberOfFields : _i > numberOfFields; fieldIndex = 0 <= numberOfFields ? ++_i : --_i) { - tag = this._readTag(ifdType, offset); - if (tag !== void 0) { - this._tags[tag.name] = { - 'value': tag.value, - 'description': tag.description - }; - } - _results.push(offset += 12); - } - return _results; - }; - - ExifReader.prototype._readTag = function(ifdType, offset) { - var tagCode, tagCount, tagDescription, tagName, tagType, tagValue, tagValueOffset; - - tagCode = this._getShortAt(offset); - tagType = this._getShortAt(offset + 2); - tagCount = this._getLongAt(offset + 4); - if (this._typeSizes[tagType] === void 0) { - return void 0; - } - if (this._typeSizes[tagType] * tagCount <= 4) { - tagValue = this._getTagValue(offset + 8, tagType, tagCount); - } else { - tagValueOffset = this._getLongAt(offset + 8); - tagValue = this._getTagValue(this._tiffHeaderOffset + tagValueOffset, tagType, tagCount); - } - if (tagType === this._tagTypes['ASCII']) { - tagValue = this._splitNullSeparatedAsciiString(tagValue); - } - if (this._tagNames[ifdType][tagCode] != null) { - if ((this._tagNames[ifdType][tagCode]['name'] != null) && (this._tagNames[ifdType][tagCode]['description'] != null)) { - tagName = this._tagNames[ifdType][tagCode]['name']; - tagDescription = this._tagNames[ifdType][tagCode]['description'](tagValue); - } else { - tagName = this._tagNames[ifdType][tagCode]; - if (tagValue instanceof Array) { - tagDescription = tagValue.join(', '); - } else { - tagDescription = tagValue; - } - } - return { - 'name': tagName, - 'value': tagValue, - 'description': tagDescription - }; - } else { - return { - 'name': "undefined-" + tagCode, - 'value': tagValue, - 'description': tagValue - }; - } - }; - - ExifReader.prototype._getTagValue = function(offset, type, count) { - var tagValue, value, valueIndex; - - value = (function() { - var _i, _results; - - _results = []; - for (valueIndex = _i = 0; 0 <= count ? _i < count : _i > count; valueIndex = 0 <= count ? ++_i : --_i) { - tagValue = this._getTagValueAt[type](offset); - offset += this._typeSizes[type]; - _results.push(tagValue); - } - return _results; - }).call(this); - if (value.length === 1) { - value = value[0]; - } else if (type === this._tagTypes['ASCII']) { - value = this._getAsciiValue(value); - } - return value; - }; - - ExifReader.prototype._getAsciiValue = function(charArray) { - var charCode, newCharArray; - - return newCharArray = (function() { - var _i, _len, _results; - - _results = []; - for (_i = 0, _len = charArray.length; _i < _len; _i++) { - charCode = charArray[_i]; - _results.push(String.fromCharCode(charCode)); - } - return _results; - })(); - }; - - ExifReader.prototype._getByteAt = function(offset) { - return this._dataView.getUint8(offset); - }; - - ExifReader.prototype._getAsciiAt = function(offset) { - return this._dataView.getUint8(offset); - }; - - ExifReader.prototype._getShortAt = function(offset) { - return this._dataView.getUint16(offset, this._littleEndian); - }; - - ExifReader.prototype._getLongAt = function(offset) { - return this._dataView.getUint32(offset, this._littleEndian); - }; - - ExifReader.prototype._getRationalAt = function(offset) { - return this._getLongAt(offset) / this._getLongAt(offset + 4); - }; - - ExifReader.prototype._getUndefinedAt = function(offset) { - return this._getByteAt(offset); - }; - - ExifReader.prototype._getSlongAt = function(offset) { - return this._dataView.getInt32(offset, this._littleEndian); - }; - - ExifReader.prototype._getSrationalAt = function(offset) { - return this._getSlongAt(offset) / this._getSlongAt(offset + 4); - }; - - ExifReader.prototype._splitNullSeparatedAsciiString = function(string) { - var character, i, tagValue, _i, _len; - - tagValue = []; - i = 0; - for (_i = 0, _len = string.length; _i < _len; _i++) { - character = string[_i]; - if (character === '\x00') { - i++; - continue; - } - if (tagValue[i] == null) { - tagValue[i] = ''; - } - tagValue[i] += character; - } - return tagValue; - }; - - ExifReader.prototype._typeSizes = { - 1: 1, - 2: 1, - 3: 2, - 4: 4, - 5: 8, - 7: 1, - 9: 4, - 10: 8 - }; - - ExifReader.prototype._tagTypes = { - 'BYTE': 1, - 'ASCII': 2, - 'SHORT': 3, - 'LONG': 4, - 'RATIONAL': 5, - 'UNDEFINED': 7, - 'SLONG': 9, - 'SRATIONAL': 10 - }; - - ExifReader.prototype._tagNames = { - '0th': { - 0x0100: 'ImageWidth', - 0x0101: 'ImageLength', - 0x0102: 'BitsPerSample', - 0x0103: 'Compression', - 0x0106: 'PhotometricInterpretation', - 0x010e: 'ImageDescription', - 0x010f: 'Make', - 0x0110: 'Model', - 0x0111: 'StripOffsets', - 0x0112: { - 'name': 'Orientation', - 'description': function(value) { - switch (value) { - case 1: - return 'top-left'; - case 2: - return 'top-right'; - case 3: - return 'bottom-right'; - case 4: - return 'bottom-left'; - case 5: - return 'left-top'; - case 6: - return 'right-top'; - case 7: - return 'right-bottom'; - case 8: - return 'left-bottom'; - default: - return 'Undefined'; - } - } - }, - 0x0115: 'SamplesPerPixel', - 0x0116: 'RowsPerStrip', - 0x0117: 'StripByteCounts', - 0x011a: 'XResolution', - 0x011b: 'YResolution', - 0x011c: 'PlanarConfiguration', - 0x0128: { - 'name': 'ResolutionUnit', - 'description': function(value) { - switch (value) { - case 2: - return 'inches'; - case 3: - return 'centimeters'; - default: - return 'Unknown'; - } - } - }, - 0x012d: 'TransferFunction', - 0x0131: 'Software', - 0x0132: 'DateTime', - 0x013b: 'Artist', - 0x013e: 'WhitePoint', - 0x013f: 'PrimaryChromaticities', - 0x0201: 'JPEGInterchangeFormat', - 0x0202: 'JPEGInterchangeFormatLength', - 0x0211: 'YCbCrCoefficients', - 0x0212: 'YCbCrSubSampling', - 0x0213: { - 'name': 'YCbCrPositioning', - 'description': function(value) { - switch (value) { - case 1: - return 'centered'; - case 2: - return 'co-sited'; - default: - return 'undefied ' + value; - } - } - }, - 0x0214: 'ReferenceBlackWhite', - 0x8298: { - 'name': 'Copyright', - 'description': function(value) { - return value.join('; '); - } - }, - 0x8769: 'Exif IFD Pointer', - 0x8825: 'GPS Info IFD Pointer' - }, - 'exif': { - 0x829a: 'ExposureTime', - 0x829d: 'FNumber', - 0x8822: { - 'name': 'ExposureProgram', - 'description': function(value) { - switch (value) { - case 0: - return 'Undefined'; - case 1: - return 'Manual'; - case 2: - return 'Normal program'; - case 3: - return 'Aperture priority'; - case 4: - return 'Shutter priority'; - case 5: - return 'Creative program'; - case 6: - return 'Action program'; - case 7: - return 'Portrait mode'; - case 8: - return 'Landscape mode'; - default: - return 'Unknown'; - } - } - }, - 0x8824: 'SpectralSensitivity', - 0x8827: 'ISOSpeedRatings', - 0x8828: { - 'name': 'OECF', - 'description': function(value) { - return '[Raw OECF table data]'; - } - }, - 0x9000: { - 'name': 'ExifVersion', - 'description': function(value) { - var charCode, string, _i, _len; - - string = ''; - for (_i = 0, _len = value.length; _i < _len; _i++) { - charCode = value[_i]; - string += String.fromCharCode(charCode); - } - return string; - } - }, - 0x9003: 'DateTimeOriginal', - 0x9004: 'DateTimeDigitized', - 0x9101: { - 'name': 'ComponentsConfiguration', - 'description': function(value) { - var character, string, _i, _len; - - string = ''; - for (_i = 0, _len = value.length; _i < _len; _i++) { - character = value[_i]; - switch (character) { - case 0x31: - string += 'Y'; - break; - case 0x32: - string += 'Cb'; - break; - case 0x33: - string += 'Cr'; - break; - case 0x34: - string += 'R'; - break; - case 0x35: - string += 'G'; - break; - case 0x36: - string += 'B'; - } - } - return string; - } - }, - 0x9102: 'CompressedBitsPerPixel', - 0x9201: 'ShutterSpeedValue', - 0x9202: 'ApertureValue', - 0x9203: 'BrightnessValue', - 0x9204: 'ExposureBiasValue', - 0x9205: 'MaxApertureValue', - 0x9206: 'SubjectDistance', - 0x9207: { - 'name': 'MeteringMode', - 'description': function(value) { - switch (value) { - case 1: - return 'Average'; - case 2: - return 'CenterWeightedAverage'; - case 3: - return 'Spot'; - case 4: - return 'MultiSpot'; - case 5: - return 'Pattern'; - case 6: - return 'Partial'; - case 255: - return 'Other'; - default: - return 'Unknown'; - } - } - }, - 0x9208: { - 'name': 'LightSource', - 'description': function(value) { - switch (value) { - case 1: - return 'Daylight'; - case 2: - return 'Fluorescent'; - case 3: - return 'Tungsten (incandescent light)'; - case 4: - return 'Flash'; - case 9: - return 'Fine weather'; - case 10: - return 'Cloudy weather'; - case 11: - return 'Shade'; - case 12: - return 'Daylight fluorescent (D 5700 – 7100K)'; - case 13: - return 'Day white fluorescent (N 4600 – 5400K)'; - case 14: - return 'Cool white fluorescent (W 3900 – 4500K)'; - case 15: - return 'White fluorescent (WW 3200 – 3700K)'; - case 17: - return 'Standard light A'; - case 18: - return 'Standard light B'; - case 19: - return 'Standard light C'; - case 20: - return 'D55'; - case 21: - return 'D65'; - case 22: - return 'D75'; - case 23: - return 'D50'; - case 24: - return 'ISO studio tungsten'; - case 255: - return 'Other light source'; - default: - return 'Unknown'; - } - } - }, - 0x9209: { - 'name': 'Flash', - 'description': function(value) { - switch (value) { - case 0x00: - return 'Flash did not fire'; - case 0x01: - return 'Flash fired'; - case 0x05: - return 'Strobe return light not detected'; - case 0x07: - return 'Strobe return light detected'; - case 0x09: - return 'Flash fired, compulsory flash mode'; - case 0x0d: - return 'Flash fired, compulsory flash mode, return light not detected'; - case 0x0f: - return 'Flash fired, compulsory flash mode, return light detected'; - case 0x10: - return 'Flash did not fire, compulsory flash mode'; - case 0x18: - return 'Flash did not fire, auto mode'; - case 0x19: - return 'Flash fired, auto mode'; - case 0x1d: - return 'Flash fired, auto mode, return light not detected'; - case 0x1f: - return 'Flash fired, auto mode, return light detected'; - case 0x20: - return 'No flash function'; - case 0x41: - return 'Flash fired, red-eye reduction mode'; - case 0x45: - return 'Flash fired, red-eye reduction mode, return light not detected'; - case 0x47: - return 'Flash fired, red-eye reduction mode, return light detected'; - case 0x49: - return 'Flash fired, compulsory flash mode, red-eye reduction mode'; - case 0x4d: - return 'Flash fired, compulsory flash mode, red-eye reduction mode, return light not detected'; - case 0x4f: - return 'Flash fired, compulsory flash mode, red-eye reduction mode, return light detected'; - case 0x59: - return 'Flash fired, auto mode, red-eye reduction mode'; - case 0x5d: - return 'Flash fired, auto mode, return light not detected, red-eye reduction mode'; - case 0x5f: - return 'Flash fired, auto mode, return light detected, red-eye reduction mode'; - default: - return 'Unknown'; - } - } - }, - 0x920a: 'FocalLength', - 0x9214: { - 'name': 'SubjectArea', - 'description': function(value) { - switch (value.length) { - case 2: - return "Location; X: " + value[0] + ", Y: " + value[1]; - case 3: - return "Circle; X: " + value[0] + ", Y: " + value[1] + ", diameter: " + value[2]; - case 4: - return "Rectangle; X: " + value[0] + ", Y: " + value[1] + ", width: " + value[2] + ", height: " + value[3]; - default: - return 'Unknown'; - } - } - }, - 0x927c: { - 'name': 'MakerNote', - 'description': function(value) { - return '[Raw maker note data]'; - } - }, - 0x9286: { - 'name': 'UserComment', - 'description': function(value) { - switch (value.slice(0, 8).map(function(charCode) { - return String.fromCharCode(charCode); - }).join('')) { - case 'ASCII\x00\x00\x00': - return value.slice(8, value.length).map(function(charCode) { - return String.fromCharCode(charCode); - }).join(''); - case 'JIS\x00\x00\x00\x00\x00': - return '[JIS encoded text]'; - case 'UNICODE\x00': - return '[Unicode encoded text]'; - case '\x00\x00\x00\x00\x00\x00\x00\x00': - return '[Undefined encoding]'; - } - } - }, - 0x9290: 'SubSecTime', - 0x9291: 'SubSecTimeOriginal', - 0x9292: 'SubSecTimeDigitized', - 0xa000: { - 'name': 'FlashpixVersion', - 'description': function(value) { - var charCode, string, _i, _len; - - string = ''; - for (_i = 0, _len = value.length; _i < _len; _i++) { - charCode = value[_i]; - string += String.fromCharCode(charCode); - } - return string; - } - }, - 0xa001: { - 'name': 'ColorSpace', - 'description': function(value) { - switch (value) { - case 1: - return 'sRGB'; - case 0xffff: - return 'Uncalibrated'; - default: - return 'Unknown'; - } - } - }, - 0xa002: 'PixelXDimension', - 0xa003: 'PixelYDimension', - 0xa004: 'RelatedSoundFile', - 0xa005: 'Interoperability IFD Pointer', - 0xa20b: 'FlashEnergy', - 0xa20c: { - 'name': 'SpatialFrequencyResponse', - 'description': function(value) { - return '[Raw SFR table data]'; - } - }, - 0xa20e: 'FocalPlaneXResolution', - 0xa20f: 'FocalPlaneYResolution', - 0xa210: { - 'name': 'FocalPlaneResolutionUnit', - 'description': function(value) { - switch (value) { - case 2: - return 'inches'; - case 3: - return 'centimeters'; - default: - return 'Unknown'; - } - } - }, - 0xa214: { - 'name': 'SubjectLocation', - 'description': function(value) { - return "X: " + value[0] + ", Y: " + value[1]; - } - }, - 0xa215: 'ExposureIndex', - 0xa217: { - 'name': 'SensingMethod', - 'description': function(value) { - switch (value) { - case 1: - return 'Undefined'; - case 2: - return 'One-chip color area sensor'; - case 3: - return 'Two-chip color area sensor'; - case 4: - return 'Three-chip color area sensor'; - case 5: - return 'Color sequential area sensor'; - case 7: - return 'Trilinear sensor'; - case 8: - return 'Color sequential linear sensor'; - default: - return 'Unknown'; - } - } - }, - 0xa300: { - 'name': 'FileSource', - 'description': function(value) { - switch (value) { - case 3: - return 'DSC'; - default: - return 'Unknown'; - } - } - }, - 0xa301: { - 'name': 'SceneType', - 'description': function(value) { - switch (value) { - case 1: - return 'A directly photographed image'; - default: - return 'Unknown'; - } - } - }, - 0xa302: { - 'name': 'CFAPattern', - 'description': function(value) { - return '[Raw CFA pattern table data]'; - } - }, - 0xa401: { - 'name': 'CustomRendered', - 'description': function(value) { - switch (value) { - case 0: - return 'Normal process'; - case 1: - return 'Custom process'; - default: - return 'Unknown'; - } - } - }, - 0xa402: { - 'name': 'ExposureMode', - 'description': function(value) { - switch (value) { - case 0: - return 'Auto exposure'; - case 1: - return 'Manual exposure'; - case 2: - return 'Auto bracket'; - default: - return 'Unknown'; - } - } - }, - 0xa403: { - 'name': 'WhiteBalance', - 'description': function(value) { - switch (value) { - case 0: - return 'Auto white balance'; - case 1: - return 'Manual white balance'; - default: - return 'Unknown'; - } - } - }, - 0xa404: { - 'name': 'DigitalZoomRatio', - 'description': function(value) { - switch (value) { - case 0: - return 'Digital zoom was not used'; - default: - return value; - } - } - }, - 0xa405: { - 'name': 'FocalLengthIn35mmFilm', - 'description': function(value) { - switch (value) { - case 0: - return 'Unknown'; - default: - return value; - } - } - }, - 0xa406: { - 'name': 'SceneCaptureType', - 'description': function(value) { - switch (value) { - case 0: - return 'Standard'; - case 1: - return 'Landscape'; - case 2: - return 'Portrait'; - case 3: - return 'Night scene'; - default: - return 'Unknown'; - } - } - }, - 0xa407: { - 'name': 'GainControl', - 'description': function(value) { - switch (value) { - case 0: - return 'None'; - case 1: - return 'Low gain up'; - case 2: - return 'High gain up'; - case 3: - return 'Low gain down'; - case 4: - return 'High gain down'; - default: - return 'Unknown'; - } - } - }, - 0xa408: { - 'name': 'Contrast', - 'description': function(value) { - switch (value) { - case 0: - return 'Normal'; - case 1: - return 'Soft'; - case 2: - return 'Hard'; - default: - return 'Unknown'; - } - } - }, - 0xa409: { - 'name': 'Saturation', - 'description': function(value) { - switch (value) { - case 0: - return 'Normal'; - case 1: - return 'Low saturation'; - case 2: - return 'High saturation'; - default: - return 'Unknown'; - } - } - }, - 0xa40a: { - 'name': 'Sharpness', - 'description': function(value) { - switch (value) { - case 0: - return 'Normal'; - case 1: - return 'Soft'; - case 2: - return 'Hard'; - default: - return 'Unknown'; - } - } - }, - 0xa40b: { - 'name': 'DeviceSettingDescription', - 'description': function(value) { - return '[Raw device settings table data]'; - } - }, - 0xa40c: { - 'name': 'SubjectDistanceRange', - 'description': function(value) { - switch (value) { - case 1: - return 'Macro'; - case 2: - return 'Close view'; - case 3: - return 'Distant view'; - default: - return 'Unknown'; - } - } - }, - 0xa420: 'ImageUniqueID' - }, - 'gps': { - 0x0000: { - 'name': 'GPSVersionID', - 'description': function(value) { - var _ref, _ref1; - - if ((value[0] === (_ref = value[1]) && _ref === 2) && (value[2] === (_ref1 = value[3]) && _ref1 === 0)) { - return 'Version 2.2'; - } else { - return 'Unknown'; - } - } - }, - 0x0001: { - 'name': 'GPSLatitudeRef', - 'description': function(value) { - switch (value.join('')) { - case 'N': - return 'North latitude'; - case 'S': - return 'South latitude'; - default: - return 'Unknown'; - } - } - }, - 0x0002: { - 'name': 'GPSLatitude', - 'description': function(value) { - return value[0] + value[1] / 60 + value[2] / 3600; - } - }, - 0x0003: { - 'name': 'GPSLongitudeRef', - 'description': function(value) { - switch (value.join('')) { - case 'E': - return 'East longitude'; - case 'W': - return 'West longitude'; - default: - return 'Unknown'; - } - } - }, - 0x0004: { - 'name': 'GPSLongitude', - 'description': function(value) { - return value[0] + value[1] / 60 + value[2] / 3600; - } - }, - 0x0005: { - 'name': 'GPSAltitudeRef', - 'description': function(value) { - switch (value) { - case 0: - return 'Sea level'; - case 1: - return 'Sea level reference (negative value)'; - default: - return 'Unknown'; - } - } - }, - 0x0006: { - 'name': 'GPSAltitude', - 'description': function(value) { - return value + ' m'; - } - }, - 0x0007: { - 'name': 'GPSTimeStamp', - 'description': function(value) { - var padZero; - - padZero = function(num) { - var i; - - return ((function() { - var _i, _ref, _results; - - _results = []; - for (i = _i = 0, _ref = 2 - ('' + Math.floor(num)).length; 0 <= _ref ? _i < _ref : _i > _ref; i = 0 <= _ref ? ++_i : --_i) { - _results.push('0'); - } - return _results; - })()) + num; - }; - return value.map(padZero).join(':'); - } - }, - 0x0008: 'GPSSatellites', - 0x0009: { - 'name': 'GPSStatus', - 'description': function(value) { - switch (value.join('')) { - case 'A': - return 'Measurement in progress'; - case 'V': - return 'Measurement Interoperability'; - default: - return 'Unknown'; - } - } - }, - 0x000a: { - 'name': 'GPSMeasureMode', - 'description': function(value) { - switch (value.join('')) { - case '2': - return '2-dimensional measurement'; - case '3': - return '3-dimensional measurement'; - default: - return 'Unknown'; - } - } - }, - 0x000b: 'GPSDOP', - 0x000c: { - 'name': 'GPSSpeedRef', - 'description': function(value) { - switch (value.join('')) { - case 'K': - return 'Kilometers per hour'; - case 'M': - return 'Miles per hour'; - case 'N': - return 'Knots'; - default: - return 'Unknown'; - } - } - }, - 0x000d: 'GPSSpeed', - 0x000e: { - 'name': 'GPSTrackRef', - 'description': function(value) { - switch (value.join('')) { - case 'T': - return 'True direction'; - case 'M': - return 'Magnetic direction'; - default: - return 'Unknown'; - } - } - }, - 0x000f: 'GPSTrack', - 0x0010: { - 'name': 'GPSImgDirectionRef', - 'description': function(value) { - switch (value.join('')) { - case 'T': - return 'True direction'; - case 'M': - return 'Magnetic direction'; - default: - return 'Unknown'; - } - } - }, - 0x0011: 'GPSImgDirection', - 0x0012: 'GPSMapDatum', - 0x0013: { - 'name': 'GPSDestLatitudeRef', - 'description': function(value) { - switch (value.join('')) { - case 'N': - return 'North latitude'; - case 'S': - return 'South latitude'; - default: - return 'Unknown'; - } - } - }, - 0x0014: { - 'name': 'GPSDestLatitude', - 'description': function(value) { - return value[0] + value[1] / 60 + value[2] / 3600; - } - }, - 0x0015: { - 'name': 'GPSDestLongitudeRef', - 'description': function(value) { - switch (value.join('')) { - case 'E': - return 'East longitude'; - case 'W': - return 'West longitude'; - default: - return 'Unknown'; - } - } - }, - 0x0016: { - 'name': 'GPSDestLongitude', - 'description': function(value) { - return value[0] + value[1] / 60 + value[2] / 3600; - } - }, - 0x0017: { - 'name': 'GPSDestBearingRef', - 'description': function(value) { - switch (value.join('')) { - case 'T': - return 'True direction'; - case 'M': - return 'Magnetic direction'; - default: - return 'Unknown'; - } - } - }, - 0x0018: 'GPSDestBearing', - 0x0019: { - 'name': 'GPSDestDistanceRef', - 'description': function(value) { - switch (value.join('')) { - case 'K': - return 'Kilometers'; - case 'M': - return 'Miles'; - case 'N': - return 'Knots'; - default: - return 'Unknown'; - } - } - }, - 0x001a: 'GPSDestDistance', - 0x001b: { - 'name': 'GPSProcessingMethod', - 'description': function(value) { - if (value === 0) { - return 'Undefined'; - } else { - switch (value.slice(0, 8).map(function(charCode) { - return String.fromCharCode(charCode); - }).join('')) { - case 'ASCII\x00\x00\x00': - return value.slice(8, value.length).map(function(charCode) { - return String.fromCharCode(charCode); - }).join(''); - case 'JIS\x00\x00\x00\x00\x00': - return '[JIS encoded text]'; - case 'UNICODE\x00': - return '[Unicode encoded text]'; - case '\x00\x00\x00\x00\x00\x00\x00\x00': - return '[Undefined encoding]'; - } - } - } - }, - 0x001c: { - 'name': 'GPSAreaInformation', - 'description': function(value) { - if (value === 0) { - return 'Undefined'; - } else { - switch (value.slice(0, 8).map(function(charCode) { - return String.fromCharCode(charCode); - }).join('')) { - case 'ASCII\x00\x00\x00': - return value.slice(8, value.length).map(function(charCode) { - return String.fromCharCode(charCode); - }).join(''); - case 'JIS\x00\x00\x00\x00\x00': - return '[JIS encoded text]'; - case 'UNICODE\x00': - return '[Unicode encoded text]'; - case '\x00\x00\x00\x00\x00\x00\x00\x00': - return '[Undefined encoding]'; - } - } - } - }, - 0x001d: 'GPSDateStamp', - 0x001e: { - 'name': 'GPSDifferential', - 'description': function(value) { - switch (value) { - case 0: - return 'Measurement without differential correction'; - case 1: - return 'Differential correction applied'; - default: - return 'Unknown'; - } - } - } - }, - 'interoperability': { - 0x0001: 'InteroperabilityIndex', - 0x0002: 'UnknownInteroperabilityTag0x0002', - 0x1001: 'UnknownInteroperabilityTag0x1001', - 0x1002: 'UnknownInteroperabilityTag0x1002' - } - }; - - /* - # Gets the image's value of the tag with the given name. - # - # name string The name of the tag to get the value of - # - # Returns the value of the tag with the given name if it exists, - # otherwise throws "Undefined". - */ - - - ExifReader.prototype.getTagValue = function(name) { - if (this._tags[name] != null) { - return this._tags[name].value; - } else { - return void 0; - } - }; - - /* - # Gets the image's description of the tag with the given name. - # - # name string The name of the tag to get the description of - # - # Returns the description of the tag with the given name if it exists, - # otherwise throws "Undefined". - */ - - - ExifReader.prototype.getTagDescription = function(name) { - if (this._tags[name] != null) { - return this._tags[name].description; - } else { - return void 0; - } - }; - - /* - # Gets all the image's tags. - # - # Returns the image's tags as an associative array: name -> description. - */ - - - ExifReader.prototype.getAllTags = function() { - return this._tags; - }; - - /* - # Delete a tag. - # - # name string The name of the tag to delete - # - # Delete the tag with the given name. Can be used to lower memory usage. - # E.g., the MakerNote tag can be really large. - */ - - - ExifReader.prototype.deleteTag = function(name) { - return delete this._tags[name]; - }; - - return ExifReader; - - })(); - -}).call(this); diff --git a/old/server/app/static/js/vendor/canvas-to-blob.js b/old/server/app/static/js/vendor/canvas-to-blob.js deleted file mode 100644 index 32913667..00000000 --- a/old/server/app/static/js/vendor/canvas-to-blob.js +++ /dev/null @@ -1,111 +0,0 @@ -/* - * JavaScript Canvas to Blob - * https://github.com/blueimp/JavaScript-Canvas-to-Blob - * - * Copyright 2012, Sebastian Tschan - * https://blueimp.net - * - * Licensed under the MIT license: - * http://www.opensource.org/licenses/MIT - * - * Based on stackoverflow user Stoive's code snippet: - * http://stackoverflow.com/q/4998908 - */ - -/* global atob, Blob, define */ - -;(function (window) { - 'use strict' - - var CanvasPrototype = window.HTMLCanvasElement && - window.HTMLCanvasElement.prototype - var hasBlobConstructor = window.Blob && (function () { - try { - return Boolean(new Blob()) - } catch (e) { - return false - } - }()) - var hasArrayBufferViewSupport = hasBlobConstructor && window.Uint8Array && - (function () { - try { - return new Blob([new Uint8Array(100)]).size === 100 - } catch (e) { - return false - } - }()) - var BlobBuilder = window.BlobBuilder || window.WebKitBlobBuilder || - window.MozBlobBuilder || window.MSBlobBuilder - var dataURIPattern = /^data:((.*?)(;charset=.*?)?)(;base64)?,/ - var dataURLtoBlob = (hasBlobConstructor || BlobBuilder) && window.atob && - window.ArrayBuffer && window.Uint8Array && - function (dataURI) { - var matches, - mediaType, - isBase64, - dataString, - byteString, - arrayBuffer, - intArray, - i, - bb - // Parse the dataURI components as per RFC 2397 - matches = dataURI.match(dataURIPattern) - if (!matches) { - throw new Error('invalid data URI') - } - // Default to text/plain;charset=US-ASCII - mediaType = matches[2] - ? matches[1] - : 'text/plain' + (matches[3] || ';charset=US-ASCII') - isBase64 = !!matches[4] - dataString = dataURI.slice(matches[0].length) - if (isBase64) { - // Convert base64 to raw binary data held in a string: - byteString = atob(dataString) - } else { - // Convert base64/URLEncoded data component to raw binary: - byteString = decodeURIComponent(dataString) - } - // Write the bytes of the string to an ArrayBuffer: - arrayBuffer = new ArrayBuffer(byteString.length) - intArray = new Uint8Array(arrayBuffer) - for (i = 0; i < byteString.length; i += 1) { - intArray[i] = byteString.charCodeAt(i) - } - // Write the ArrayBuffer (or ArrayBufferView) to a blob: - if (hasBlobConstructor) { - return new Blob( - [hasArrayBufferViewSupport ? intArray : arrayBuffer], - {type: mediaType} - ) - } - bb = new BlobBuilder() - bb.append(arrayBuffer) - return bb.getBlob(mediaType) - } - if (window.HTMLCanvasElement && !CanvasPrototype.toBlob) { - if (CanvasPrototype.mozGetAsFile) { - CanvasPrototype.toBlob = function (callback, type, quality) { - if (quality && CanvasPrototype.toDataURL && dataURLtoBlob) { - callback(dataURLtoBlob(this.toDataURL(type, quality))) - } else { - callback(this.mozGetAsFile('blob', type)) - } - } - } else if (CanvasPrototype.toDataURL && dataURLtoBlob) { - CanvasPrototype.toBlob = function (callback, type, quality) { - callback(dataURLtoBlob(this.toDataURL(type, quality))) - } - } - } - if (typeof define === 'function' && define.amd) { - define(function () { - return dataURLtoBlob - }) - } else if (typeof module === 'object' && module.exports) { - module.exports = dataURLtoBlob - } else { - window.dataURLtoBlob = dataURLtoBlob - } -}(window)) diff --git a/old/server/app/static/js/vendor/jquery-3.3.1.min.js b/old/server/app/static/js/vendor/jquery-3.3.1.min.js deleted file mode 100644 index 4d9b3a25..00000000 --- a/old/server/app/static/js/vendor/jquery-3.3.1.min.js +++ /dev/null @@ -1,2 +0,0 @@ -/*! jQuery v3.3.1 | (c) JS Foundation and other contributors | jquery.org/license */ -!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(e,t){"use strict";var n=[],r=e.document,i=Object.getPrototypeOf,o=n.slice,a=n.concat,s=n.push,u=n.indexOf,l={},c=l.toString,f=l.hasOwnProperty,p=f.toString,d=p.call(Object),h={},g=function e(t){return"function"==typeof t&&"number"!=typeof t.nodeType},y=function e(t){return null!=t&&t===t.window},v={type:!0,src:!0,noModule:!0};function m(e,t,n){var i,o=(t=t||r).createElement("script");if(o.text=e,n)for(i in v)n[i]&&(o[i]=n[i]);t.head.appendChild(o).parentNode.removeChild(o)}function x(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?l[c.call(e)]||"object":typeof e}var b="3.3.1",w=function(e,t){return new w.fn.init(e,t)},T=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;w.fn=w.prototype={jquery:"3.3.1",constructor:w,length:0,toArray:function(){return o.call(this)},get:function(e){return null==e?o.call(this):e<0?this[e+this.length]:this[e]},pushStack:function(e){var t=w.merge(this.constructor(),e);return t.prevObject=this,t},each:function(e){return w.each(this,e)},map:function(e){return this.pushStack(w.map(this,function(t,n){return e.call(t,n,t)}))},slice:function(){return this.pushStack(o.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var t=this.length,n=+e+(e<0?t:0);return this.pushStack(n>=0&&n<t?[this[n]]:[])},end:function(){return this.prevObject||this.constructor()},push:s,sort:n.sort,splice:n.splice},w.extend=w.fn.extend=function(){var e,t,n,r,i,o,a=arguments[0]||{},s=1,u=arguments.length,l=!1;for("boolean"==typeof a&&(l=a,a=arguments[s]||{},s++),"object"==typeof a||g(a)||(a={}),s===u&&(a=this,s--);s<u;s++)if(null!=(e=arguments[s]))for(t in e)n=a[t],a!==(r=e[t])&&(l&&r&&(w.isPlainObject(r)||(i=Array.isArray(r)))?(i?(i=!1,o=n&&Array.isArray(n)?n:[]):o=n&&w.isPlainObject(n)?n:{},a[t]=w.extend(l,o,r)):void 0!==r&&(a[t]=r));return a},w.extend({expando:"jQuery"+("3.3.1"+Math.random()).replace(/\D/g,""),isReady:!0,error:function(e){throw new Error(e)},noop:function(){},isPlainObject:function(e){var t,n;return!(!e||"[object Object]"!==c.call(e))&&(!(t=i(e))||"function"==typeof(n=f.call(t,"constructor")&&t.constructor)&&p.call(n)===d)},isEmptyObject:function(e){var t;for(t in e)return!1;return!0},globalEval:function(e){m(e)},each:function(e,t){var n,r=0;if(C(e)){for(n=e.length;r<n;r++)if(!1===t.call(e[r],r,e[r]))break}else for(r in e)if(!1===t.call(e[r],r,e[r]))break;return e},trim:function(e){return null==e?"":(e+"").replace(T,"")},makeArray:function(e,t){var n=t||[];return null!=e&&(C(Object(e))?w.merge(n,"string"==typeof e?[e]:e):s.call(n,e)),n},inArray:function(e,t,n){return null==t?-1:u.call(t,e,n)},merge:function(e,t){for(var n=+t.length,r=0,i=e.length;r<n;r++)e[i++]=t[r];return e.length=i,e},grep:function(e,t,n){for(var r,i=[],o=0,a=e.length,s=!n;o<a;o++)(r=!t(e[o],o))!==s&&i.push(e[o]);return i},map:function(e,t,n){var r,i,o=0,s=[];if(C(e))for(r=e.length;o<r;o++)null!=(i=t(e[o],o,n))&&s.push(i);else for(o in e)null!=(i=t(e[o],o,n))&&s.push(i);return a.apply([],s)},guid:1,support:h}),"function"==typeof Symbol&&(w.fn[Symbol.iterator]=n[Symbol.iterator]),w.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),function(e,t){l["[object "+t+"]"]=t.toLowerCase()});function C(e){var t=!!e&&"length"in e&&e.length,n=x(e);return!g(e)&&!y(e)&&("array"===n||0===t||"number"==typeof t&&t>0&&t-1 in e)}var E=function(e){var t,n,r,i,o,a,s,u,l,c,f,p,d,h,g,y,v,m,x,b="sizzle"+1*new Date,w=e.document,T=0,C=0,E=ae(),k=ae(),S=ae(),D=function(e,t){return e===t&&(f=!0),0},N={}.hasOwnProperty,A=[],j=A.pop,q=A.push,L=A.push,H=A.slice,O=function(e,t){for(var n=0,r=e.length;n<r;n++)if(e[n]===t)return n;return-1},P="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",M="[\\x20\\t\\r\\n\\f]",R="(?:\\\\.|[\\w-]|[^\0-\\xa0])+",I="\\["+M+"*("+R+")(?:"+M+"*([*^$|!~]?=)"+M+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+R+"))|)"+M+"*\\]",W=":("+R+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+I+")*)|.*)\\)|)",$=new RegExp(M+"+","g"),B=new RegExp("^"+M+"+|((?:^|[^\\\\])(?:\\\\.)*)"+M+"+$","g"),F=new RegExp("^"+M+"*,"+M+"*"),_=new RegExp("^"+M+"*([>+~]|"+M+")"+M+"*"),z=new RegExp("="+M+"*([^\\]'\"]*?)"+M+"*\\]","g"),X=new RegExp(W),U=new RegExp("^"+R+"$"),V={ID:new RegExp("^#("+R+")"),CLASS:new RegExp("^\\.("+R+")"),TAG:new RegExp("^("+R+"|[*])"),ATTR:new RegExp("^"+I),PSEUDO:new RegExp("^"+W),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+P+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},G=/^(?:input|select|textarea|button)$/i,Y=/^h\d$/i,Q=/^[^{]+\{\s*\[native \w/,J=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,K=/[+~]/,Z=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),ee=function(e,t,n){var r="0x"+t-65536;return r!==r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023&r|56320)},te=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ne=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},re=function(){p()},ie=me(function(e){return!0===e.disabled&&("form"in e||"label"in e)},{dir:"parentNode",next:"legend"});try{L.apply(A=H.call(w.childNodes),w.childNodes),A[w.childNodes.length].nodeType}catch(e){L={apply:A.length?function(e,t){q.apply(e,H.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function oe(e,t,r,i){var o,s,l,c,f,h,v,m=t&&t.ownerDocument,T=t?t.nodeType:9;if(r=r||[],"string"!=typeof e||!e||1!==T&&9!==T&&11!==T)return r;if(!i&&((t?t.ownerDocument||t:w)!==d&&p(t),t=t||d,g)){if(11!==T&&(f=J.exec(e)))if(o=f[1]){if(9===T){if(!(l=t.getElementById(o)))return r;if(l.id===o)return r.push(l),r}else if(m&&(l=m.getElementById(o))&&x(t,l)&&l.id===o)return r.push(l),r}else{if(f[2])return L.apply(r,t.getElementsByTagName(e)),r;if((o=f[3])&&n.getElementsByClassName&&t.getElementsByClassName)return L.apply(r,t.getElementsByClassName(o)),r}if(n.qsa&&!S[e+" "]&&(!y||!y.test(e))){if(1!==T)m=t,v=e;else if("object"!==t.nodeName.toLowerCase()){(c=t.getAttribute("id"))?c=c.replace(te,ne):t.setAttribute("id",c=b),s=(h=a(e)).length;while(s--)h[s]="#"+c+" "+ve(h[s]);v=h.join(","),m=K.test(e)&&ge(t.parentNode)||t}if(v)try{return L.apply(r,m.querySelectorAll(v)),r}catch(e){}finally{c===b&&t.removeAttribute("id")}}}return u(e.replace(B,"$1"),t,r,i)}function ae(){var e=[];function t(n,i){return e.push(n+" ")>r.cacheLength&&delete t[e.shift()],t[n+" "]=i}return t}function se(e){return e[b]=!0,e}function ue(e){var t=d.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function le(e,t){var n=e.split("|"),i=n.length;while(i--)r.attrHandle[n[i]]=t}function ce(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function fe(e){return function(t){return"input"===t.nodeName.toLowerCase()&&t.type===e}}function pe(e){return function(t){var n=t.nodeName.toLowerCase();return("input"===n||"button"===n)&&t.type===e}}function de(e){return function(t){return"form"in t?t.parentNode&&!1===t.disabled?"label"in t?"label"in t.parentNode?t.parentNode.disabled===e:t.disabled===e:t.isDisabled===e||t.isDisabled!==!e&&ie(t)===e:t.disabled===e:"label"in t&&t.disabled===e}}function he(e){return se(function(t){return t=+t,se(function(n,r){var i,o=e([],n.length,t),a=o.length;while(a--)n[i=o[a]]&&(n[i]=!(r[i]=n[i]))})})}function ge(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}n=oe.support={},o=oe.isXML=function(e){var t=e&&(e.ownerDocument||e).documentElement;return!!t&&"HTML"!==t.nodeName},p=oe.setDocument=function(e){var t,i,a=e?e.ownerDocument||e:w;return a!==d&&9===a.nodeType&&a.documentElement?(d=a,h=d.documentElement,g=!o(d),w!==d&&(i=d.defaultView)&&i.top!==i&&(i.addEventListener?i.addEventListener("unload",re,!1):i.attachEvent&&i.attachEvent("onunload",re)),n.attributes=ue(function(e){return e.className="i",!e.getAttribute("className")}),n.getElementsByTagName=ue(function(e){return e.appendChild(d.createComment("")),!e.getElementsByTagName("*").length}),n.getElementsByClassName=Q.test(d.getElementsByClassName),n.getById=ue(function(e){return h.appendChild(e).id=b,!d.getElementsByName||!d.getElementsByName(b).length}),n.getById?(r.filter.ID=function(e){var t=e.replace(Z,ee);return function(e){return e.getAttribute("id")===t}},r.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&g){var n=t.getElementById(e);return n?[n]:[]}}):(r.filter.ID=function(e){var t=e.replace(Z,ee);return function(e){var n="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return n&&n.value===t}},r.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&g){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),r.find.TAG=n.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):n.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},r.find.CLASS=n.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&g)return t.getElementsByClassName(e)},v=[],y=[],(n.qsa=Q.test(d.querySelectorAll))&&(ue(function(e){h.appendChild(e).innerHTML="<a id='"+b+"'></a><select id='"+b+"-\r\\' msallowcapture=''><option selected=''></option></select>",e.querySelectorAll("[msallowcapture^='']").length&&y.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||y.push("\\["+M+"*(?:value|"+P+")"),e.querySelectorAll("[id~="+b+"-]").length||y.push("~="),e.querySelectorAll(":checked").length||y.push(":checked"),e.querySelectorAll("a#"+b+"+*").length||y.push(".#.+[+~]")}),ue(function(e){e.innerHTML="<a href='' disabled='disabled'></a><select disabled='disabled'><option/></select>";var t=d.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&y.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&y.push(":enabled",":disabled"),h.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&y.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),y.push(",.*:")})),(n.matchesSelector=Q.test(m=h.matches||h.webkitMatchesSelector||h.mozMatchesSelector||h.oMatchesSelector||h.msMatchesSelector))&&ue(function(e){n.disconnectedMatch=m.call(e,"*"),m.call(e,"[s!='']:x"),v.push("!=",W)}),y=y.length&&new RegExp(y.join("|")),v=v.length&&new RegExp(v.join("|")),t=Q.test(h.compareDocumentPosition),x=t||Q.test(h.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return f=!0,0;var r=!e.compareDocumentPosition-!t.compareDocumentPosition;return r||(1&(r=(e.ownerDocument||e)===(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!n.sortDetached&&t.compareDocumentPosition(e)===r?e===d||e.ownerDocument===w&&x(w,e)?-1:t===d||t.ownerDocument===w&&x(w,t)?1:c?O(c,e)-O(c,t):0:4&r?-1:1)}:function(e,t){if(e===t)return f=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e===d?-1:t===d?1:i?-1:o?1:c?O(c,e)-O(c,t):0;if(i===o)return ce(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?ce(a[r],s[r]):a[r]===w?-1:s[r]===w?1:0},d):d},oe.matches=function(e,t){return oe(e,null,null,t)},oe.matchesSelector=function(e,t){if((e.ownerDocument||e)!==d&&p(e),t=t.replace(z,"='$1']"),n.matchesSelector&&g&&!S[t+" "]&&(!v||!v.test(t))&&(!y||!y.test(t)))try{var r=m.call(e,t);if(r||n.disconnectedMatch||e.document&&11!==e.document.nodeType)return r}catch(e){}return oe(t,d,null,[e]).length>0},oe.contains=function(e,t){return(e.ownerDocument||e)!==d&&p(e),x(e,t)},oe.attr=function(e,t){(e.ownerDocument||e)!==d&&p(e);var i=r.attrHandle[t.toLowerCase()],o=i&&N.call(r.attrHandle,t.toLowerCase())?i(e,t,!g):void 0;return void 0!==o?o:n.attributes||!g?e.getAttribute(t):(o=e.getAttributeNode(t))&&o.specified?o.value:null},oe.escape=function(e){return(e+"").replace(te,ne)},oe.error=function(e){throw new Error("Syntax error, unrecognized expression: "+e)},oe.uniqueSort=function(e){var t,r=[],i=0,o=0;if(f=!n.detectDuplicates,c=!n.sortStable&&e.slice(0),e.sort(D),f){while(t=e[o++])t===e[o]&&(i=r.push(o));while(i--)e.splice(r[i],1)}return c=null,e},i=oe.getText=function(e){var t,n="",r=0,o=e.nodeType;if(o){if(1===o||9===o||11===o){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=i(e)}else if(3===o||4===o)return e.nodeValue}else while(t=e[r++])n+=i(t);return n},(r=oe.selectors={cacheLength:50,createPseudo:se,match:V,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(Z,ee),e[3]=(e[3]||e[4]||e[5]||"").replace(Z,ee),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||oe.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&oe.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return V.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=a(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(Z,ee).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=E[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&E(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(e,t,n){return function(r){var i=oe.attr(r,e);return null==i?"!="===t:!t||(i+="","="===t?i===n:"!="===t?i!==n:"^="===t?n&&0===i.indexOf(n):"*="===t?n&&i.indexOf(n)>-1:"$="===t?n&&i.slice(-n.length)===n:"~="===t?(" "+i.replace($," ")+" ").indexOf(n)>-1:"|="===t&&(i===n||i.slice(0,n.length+1)===n+"-"))}},CHILD:function(e,t,n,r,i){var o="nth"!==e.slice(0,3),a="last"!==e.slice(-4),s="of-type"===t;return 1===r&&0===i?function(e){return!!e.parentNode}:function(t,n,u){var l,c,f,p,d,h,g=o!==a?"nextSibling":"previousSibling",y=t.parentNode,v=s&&t.nodeName.toLowerCase(),m=!u&&!s,x=!1;if(y){if(o){while(g){p=t;while(p=p[g])if(s?p.nodeName.toLowerCase()===v:1===p.nodeType)return!1;h=g="only"===e&&!h&&"nextSibling"}return!0}if(h=[a?y.firstChild:y.lastChild],a&&m){x=(d=(l=(c=(f=(p=y)[b]||(p[b]={}))[p.uniqueID]||(f[p.uniqueID]={}))[e]||[])[0]===T&&l[1])&&l[2],p=d&&y.childNodes[d];while(p=++d&&p&&p[g]||(x=d=0)||h.pop())if(1===p.nodeType&&++x&&p===t){c[e]=[T,d,x];break}}else if(m&&(x=d=(l=(c=(f=(p=t)[b]||(p[b]={}))[p.uniqueID]||(f[p.uniqueID]={}))[e]||[])[0]===T&&l[1]),!1===x)while(p=++d&&p&&p[g]||(x=d=0)||h.pop())if((s?p.nodeName.toLowerCase()===v:1===p.nodeType)&&++x&&(m&&((c=(f=p[b]||(p[b]={}))[p.uniqueID]||(f[p.uniqueID]={}))[e]=[T,x]),p===t))break;return(x-=i)===r||x%r==0&&x/r>=0}}},PSEUDO:function(e,t){var n,i=r.pseudos[e]||r.setFilters[e.toLowerCase()]||oe.error("unsupported pseudo: "+e);return i[b]?i(t):i.length>1?(n=[e,e,"",t],r.setFilters.hasOwnProperty(e.toLowerCase())?se(function(e,n){var r,o=i(e,t),a=o.length;while(a--)e[r=O(e,o[a])]=!(n[r]=o[a])}):function(e){return i(e,0,n)}):i}},pseudos:{not:se(function(e){var t=[],n=[],r=s(e.replace(B,"$1"));return r[b]?se(function(e,t,n,i){var o,a=r(e,null,i,[]),s=e.length;while(s--)(o=a[s])&&(e[s]=!(t[s]=o))}):function(e,i,o){return t[0]=e,r(t,null,o,n),t[0]=null,!n.pop()}}),has:se(function(e){return function(t){return oe(e,t).length>0}}),contains:se(function(e){return e=e.replace(Z,ee),function(t){return(t.textContent||t.innerText||i(t)).indexOf(e)>-1}}),lang:se(function(e){return U.test(e||"")||oe.error("unsupported lang: "+e),e=e.replace(Z,ee).toLowerCase(),function(t){var n;do{if(n=g?t.lang:t.getAttribute("xml:lang")||t.getAttribute("lang"))return(n=n.toLowerCase())===e||0===n.indexOf(e+"-")}while((t=t.parentNode)&&1===t.nodeType);return!1}}),target:function(t){var n=e.location&&e.location.hash;return n&&n.slice(1)===t.id},root:function(e){return e===h},focus:function(e){return e===d.activeElement&&(!d.hasFocus||d.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:de(!1),disabled:de(!0),checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,!0===e.selected},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeType<6)return!1;return!0},parent:function(e){return!r.pseudos.empty(e)},header:function(e){return Y.test(e.nodeName)},input:function(e){return G.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||"text"===t.toLowerCase())},first:he(function(){return[0]}),last:he(function(e,t){return[t-1]}),eq:he(function(e,t,n){return[n<0?n+t:n]}),even:he(function(e,t){for(var n=0;n<t;n+=2)e.push(n);return e}),odd:he(function(e,t){for(var n=1;n<t;n+=2)e.push(n);return e}),lt:he(function(e,t,n){for(var r=n<0?n+t:n;--r>=0;)e.push(r);return e}),gt:he(function(e,t,n){for(var r=n<0?n+t:n;++r<t;)e.push(r);return e})}}).pseudos.nth=r.pseudos.eq;for(t in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})r.pseudos[t]=fe(t);for(t in{submit:!0,reset:!0})r.pseudos[t]=pe(t);function ye(){}ye.prototype=r.filters=r.pseudos,r.setFilters=new ye,a=oe.tokenize=function(e,t){var n,i,o,a,s,u,l,c=k[e+" "];if(c)return t?0:c.slice(0);s=e,u=[],l=r.preFilter;while(s){n&&!(i=F.exec(s))||(i&&(s=s.slice(i[0].length)||s),u.push(o=[])),n=!1,(i=_.exec(s))&&(n=i.shift(),o.push({value:n,type:i[0].replace(B," ")}),s=s.slice(n.length));for(a in r.filter)!(i=V[a].exec(s))||l[a]&&!(i=l[a](i))||(n=i.shift(),o.push({value:n,type:a,matches:i}),s=s.slice(n.length));if(!n)break}return t?s.length:s?oe.error(e):k(e,u).slice(0)};function ve(e){for(var t=0,n=e.length,r="";t<n;t++)r+=e[t].value;return r}function me(e,t,n){var r=t.dir,i=t.next,o=i||r,a=n&&"parentNode"===o,s=C++;return t.first?function(t,n,i){while(t=t[r])if(1===t.nodeType||a)return e(t,n,i);return!1}:function(t,n,u){var l,c,f,p=[T,s];if(u){while(t=t[r])if((1===t.nodeType||a)&&e(t,n,u))return!0}else while(t=t[r])if(1===t.nodeType||a)if(f=t[b]||(t[b]={}),c=f[t.uniqueID]||(f[t.uniqueID]={}),i&&i===t.nodeName.toLowerCase())t=t[r]||t;else{if((l=c[o])&&l[0]===T&&l[1]===s)return p[2]=l[2];if(c[o]=p,p[2]=e(t,n,u))return!0}return!1}}function xe(e){return e.length>1?function(t,n,r){var i=e.length;while(i--)if(!e[i](t,n,r))return!1;return!0}:e[0]}function be(e,t,n){for(var r=0,i=t.length;r<i;r++)oe(e,t[r],n);return n}function we(e,t,n,r,i){for(var o,a=[],s=0,u=e.length,l=null!=t;s<u;s++)(o=e[s])&&(n&&!n(o,r,i)||(a.push(o),l&&t.push(s)));return a}function Te(e,t,n,r,i,o){return r&&!r[b]&&(r=Te(r)),i&&!i[b]&&(i=Te(i,o)),se(function(o,a,s,u){var l,c,f,p=[],d=[],h=a.length,g=o||be(t||"*",s.nodeType?[s]:s,[]),y=!e||!o&&t?g:we(g,p,e,s,u),v=n?i||(o?e:h||r)?[]:a:y;if(n&&n(y,v,s,u),r){l=we(v,d),r(l,[],s,u),c=l.length;while(c--)(f=l[c])&&(v[d[c]]=!(y[d[c]]=f))}if(o){if(i||e){if(i){l=[],c=v.length;while(c--)(f=v[c])&&l.push(y[c]=f);i(null,v=[],l,u)}c=v.length;while(c--)(f=v[c])&&(l=i?O(o,f):p[c])>-1&&(o[l]=!(a[l]=f))}}else v=we(v===a?v.splice(h,v.length):v),i?i(null,a,v,u):L.apply(a,v)})}function Ce(e){for(var t,n,i,o=e.length,a=r.relative[e[0].type],s=a||r.relative[" "],u=a?1:0,c=me(function(e){return e===t},s,!0),f=me(function(e){return O(t,e)>-1},s,!0),p=[function(e,n,r){var i=!a&&(r||n!==l)||((t=n).nodeType?c(e,n,r):f(e,n,r));return t=null,i}];u<o;u++)if(n=r.relative[e[u].type])p=[me(xe(p),n)];else{if((n=r.filter[e[u].type].apply(null,e[u].matches))[b]){for(i=++u;i<o;i++)if(r.relative[e[i].type])break;return Te(u>1&&xe(p),u>1&&ve(e.slice(0,u-1).concat({value:" "===e[u-2].type?"*":""})).replace(B,"$1"),n,u<i&&Ce(e.slice(u,i)),i<o&&Ce(e=e.slice(i)),i<o&&ve(e))}p.push(n)}return xe(p)}function Ee(e,t){var n=t.length>0,i=e.length>0,o=function(o,a,s,u,c){var f,h,y,v=0,m="0",x=o&&[],b=[],w=l,C=o||i&&r.find.TAG("*",c),E=T+=null==w?1:Math.random()||.1,k=C.length;for(c&&(l=a===d||a||c);m!==k&&null!=(f=C[m]);m++){if(i&&f){h=0,a||f.ownerDocument===d||(p(f),s=!g);while(y=e[h++])if(y(f,a||d,s)){u.push(f);break}c&&(T=E)}n&&((f=!y&&f)&&v--,o&&x.push(f))}if(v+=m,n&&m!==v){h=0;while(y=t[h++])y(x,b,a,s);if(o){if(v>0)while(m--)x[m]||b[m]||(b[m]=j.call(u));b=we(b)}L.apply(u,b),c&&!o&&b.length>0&&v+t.length>1&&oe.uniqueSort(u)}return c&&(T=E,l=w),x};return n?se(o):o}return s=oe.compile=function(e,t){var n,r=[],i=[],o=S[e+" "];if(!o){t||(t=a(e)),n=t.length;while(n--)(o=Ce(t[n]))[b]?r.push(o):i.push(o);(o=S(e,Ee(i,r))).selector=e}return o},u=oe.select=function(e,t,n,i){var o,u,l,c,f,p="function"==typeof e&&e,d=!i&&a(e=p.selector||e);if(n=n||[],1===d.length){if((u=d[0]=d[0].slice(0)).length>2&&"ID"===(l=u[0]).type&&9===t.nodeType&&g&&r.relative[u[1].type]){if(!(t=(r.find.ID(l.matches[0].replace(Z,ee),t)||[])[0]))return n;p&&(t=t.parentNode),e=e.slice(u.shift().value.length)}o=V.needsContext.test(e)?0:u.length;while(o--){if(l=u[o],r.relative[c=l.type])break;if((f=r.find[c])&&(i=f(l.matches[0].replace(Z,ee),K.test(u[0].type)&&ge(t.parentNode)||t))){if(u.splice(o,1),!(e=i.length&&ve(u)))return L.apply(n,i),n;break}}}return(p||s(e,d))(i,t,!g,n,!t||K.test(e)&&ge(t.parentNode)||t),n},n.sortStable=b.split("").sort(D).join("")===b,n.detectDuplicates=!!f,p(),n.sortDetached=ue(function(e){return 1&e.compareDocumentPosition(d.createElement("fieldset"))}),ue(function(e){return e.innerHTML="<a href='#'></a>","#"===e.firstChild.getAttribute("href")})||le("type|href|height|width",function(e,t,n){if(!n)return e.getAttribute(t,"type"===t.toLowerCase()?1:2)}),n.attributes&&ue(function(e){return e.innerHTML="<input/>",e.firstChild.setAttribute("value",""),""===e.firstChild.getAttribute("value")})||le("value",function(e,t,n){if(!n&&"input"===e.nodeName.toLowerCase())return e.defaultValue}),ue(function(e){return null==e.getAttribute("disabled")})||le(P,function(e,t,n){var r;if(!n)return!0===e[t]?t.toLowerCase():(r=e.getAttributeNode(t))&&r.specified?r.value:null}),oe}(e);w.find=E,w.expr=E.selectors,w.expr[":"]=w.expr.pseudos,w.uniqueSort=w.unique=E.uniqueSort,w.text=E.getText,w.isXMLDoc=E.isXML,w.contains=E.contains,w.escapeSelector=E.escape;var k=function(e,t,n){var r=[],i=void 0!==n;while((e=e[t])&&9!==e.nodeType)if(1===e.nodeType){if(i&&w(e).is(n))break;r.push(e)}return r},S=function(e,t){for(var n=[];e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n},D=w.expr.match.needsContext;function N(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()}var A=/^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,t,n){return g(t)?w.grep(e,function(e,r){return!!t.call(e,r,e)!==n}):t.nodeType?w.grep(e,function(e){return e===t!==n}):"string"!=typeof t?w.grep(e,function(e){return u.call(t,e)>-1!==n}):w.filter(t,e,n)}w.filter=function(e,t,n){var r=t[0];return n&&(e=":not("+e+")"),1===t.length&&1===r.nodeType?w.find.matchesSelector(r,e)?[r]:[]:w.find.matches(e,w.grep(t,function(e){return 1===e.nodeType}))},w.fn.extend({find:function(e){var t,n,r=this.length,i=this;if("string"!=typeof e)return this.pushStack(w(e).filter(function(){for(t=0;t<r;t++)if(w.contains(i[t],this))return!0}));for(n=this.pushStack([]),t=0;t<r;t++)w.find(e,i[t],n);return r>1?w.uniqueSort(n):n},filter:function(e){return this.pushStack(j(this,e||[],!1))},not:function(e){return this.pushStack(j(this,e||[],!0))},is:function(e){return!!j(this,"string"==typeof e&&D.test(e)?w(e):e||[],!1).length}});var q,L=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/;(w.fn.init=function(e,t,n){var i,o;if(!e)return this;if(n=n||q,"string"==typeof e){if(!(i="<"===e[0]&&">"===e[e.length-1]&&e.length>=3?[null,e,null]:L.exec(e))||!i[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(i[1]){if(t=t instanceof w?t[0]:t,w.merge(this,w.parseHTML(i[1],t&&t.nodeType?t.ownerDocument||t:r,!0)),A.test(i[1])&&w.isPlainObject(t))for(i in t)g(this[i])?this[i](t[i]):this.attr(i,t[i]);return this}return(o=r.getElementById(i[2]))&&(this[0]=o,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):g(e)?void 0!==n.ready?n.ready(e):e(w):w.makeArray(e,this)}).prototype=w.fn,q=w(r);var H=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};w.fn.extend({has:function(e){var t=w(e,this),n=t.length;return this.filter(function(){for(var e=0;e<n;e++)if(w.contains(this,t[e]))return!0})},closest:function(e,t){var n,r=0,i=this.length,o=[],a="string"!=typeof e&&w(e);if(!D.test(e))for(;r<i;r++)for(n=this[r];n&&n!==t;n=n.parentNode)if(n.nodeType<11&&(a?a.index(n)>-1:1===n.nodeType&&w.find.matchesSelector(n,e))){o.push(n);break}return this.pushStack(o.length>1?w.uniqueSort(o):o)},index:function(e){return e?"string"==typeof e?u.call(w(e),this[0]):u.call(this,e.jquery?e[0]:e):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){return this.pushStack(w.uniqueSort(w.merge(this.get(),w(e,t))))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}});function P(e,t){while((e=e[t])&&1!==e.nodeType);return e}w.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return k(e,"parentNode")},parentsUntil:function(e,t,n){return k(e,"parentNode",n)},next:function(e){return P(e,"nextSibling")},prev:function(e){return P(e,"previousSibling")},nextAll:function(e){return k(e,"nextSibling")},prevAll:function(e){return k(e,"previousSibling")},nextUntil:function(e,t,n){return k(e,"nextSibling",n)},prevUntil:function(e,t,n){return k(e,"previousSibling",n)},siblings:function(e){return S((e.parentNode||{}).firstChild,e)},children:function(e){return S(e.firstChild)},contents:function(e){return N(e,"iframe")?e.contentDocument:(N(e,"template")&&(e=e.content||e),w.merge([],e.childNodes))}},function(e,t){w.fn[e]=function(n,r){var i=w.map(this,t,n);return"Until"!==e.slice(-5)&&(r=n),r&&"string"==typeof r&&(i=w.filter(r,i)),this.length>1&&(O[e]||w.uniqueSort(i),H.test(e)&&i.reverse()),this.pushStack(i)}});var M=/[^\x20\t\r\n\f]+/g;function R(e){var t={};return w.each(e.match(M)||[],function(e,n){t[n]=!0}),t}w.Callbacks=function(e){e="string"==typeof e?R(e):w.extend({},e);var t,n,r,i,o=[],a=[],s=-1,u=function(){for(i=i||e.once,r=t=!0;a.length;s=-1){n=a.shift();while(++s<o.length)!1===o[s].apply(n[0],n[1])&&e.stopOnFalse&&(s=o.length,n=!1)}e.memory||(n=!1),t=!1,i&&(o=n?[]:"")},l={add:function(){return o&&(n&&!t&&(s=o.length-1,a.push(n)),function t(n){w.each(n,function(n,r){g(r)?e.unique&&l.has(r)||o.push(r):r&&r.length&&"string"!==x(r)&&t(r)})}(arguments),n&&!t&&u()),this},remove:function(){return w.each(arguments,function(e,t){var n;while((n=w.inArray(t,o,n))>-1)o.splice(n,1),n<=s&&s--}),this},has:function(e){return e?w.inArray(e,o)>-1:o.length>0},empty:function(){return o&&(o=[]),this},disable:function(){return i=a=[],o=n="",this},disabled:function(){return!o},lock:function(){return i=a=[],n||t||(o=n=""),this},locked:function(){return!!i},fireWith:function(e,n){return i||(n=[e,(n=n||[]).slice?n.slice():n],a.push(n),t||u()),this},fire:function(){return l.fireWith(this,arguments),this},fired:function(){return!!r}};return l};function I(e){return e}function W(e){throw e}function $(e,t,n,r){var i;try{e&&g(i=e.promise)?i.call(e).done(t).fail(n):e&&g(i=e.then)?i.call(e,t,n):t.apply(void 0,[e].slice(r))}catch(e){n.apply(void 0,[e])}}w.extend({Deferred:function(t){var n=[["notify","progress",w.Callbacks("memory"),w.Callbacks("memory"),2],["resolve","done",w.Callbacks("once memory"),w.Callbacks("once memory"),0,"resolved"],["reject","fail",w.Callbacks("once memory"),w.Callbacks("once memory"),1,"rejected"]],r="pending",i={state:function(){return r},always:function(){return o.done(arguments).fail(arguments),this},"catch":function(e){return i.then(null,e)},pipe:function(){var e=arguments;return w.Deferred(function(t){w.each(n,function(n,r){var i=g(e[r[4]])&&e[r[4]];o[r[1]](function(){var e=i&&i.apply(this,arguments);e&&g(e.promise)?e.promise().progress(t.notify).done(t.resolve).fail(t.reject):t[r[0]+"With"](this,i?[e]:arguments)})}),e=null}).promise()},then:function(t,r,i){var o=0;function a(t,n,r,i){return function(){var s=this,u=arguments,l=function(){var e,l;if(!(t<o)){if((e=r.apply(s,u))===n.promise())throw new TypeError("Thenable self-resolution");l=e&&("object"==typeof e||"function"==typeof e)&&e.then,g(l)?i?l.call(e,a(o,n,I,i),a(o,n,W,i)):(o++,l.call(e,a(o,n,I,i),a(o,n,W,i),a(o,n,I,n.notifyWith))):(r!==I&&(s=void 0,u=[e]),(i||n.resolveWith)(s,u))}},c=i?l:function(){try{l()}catch(e){w.Deferred.exceptionHook&&w.Deferred.exceptionHook(e,c.stackTrace),t+1>=o&&(r!==W&&(s=void 0,u=[e]),n.rejectWith(s,u))}};t?c():(w.Deferred.getStackHook&&(c.stackTrace=w.Deferred.getStackHook()),e.setTimeout(c))}}return w.Deferred(function(e){n[0][3].add(a(0,e,g(i)?i:I,e.notifyWith)),n[1][3].add(a(0,e,g(t)?t:I)),n[2][3].add(a(0,e,g(r)?r:W))}).promise()},promise:function(e){return null!=e?w.extend(e,i):i}},o={};return w.each(n,function(e,t){var a=t[2],s=t[5];i[t[1]]=a.add,s&&a.add(function(){r=s},n[3-e][2].disable,n[3-e][3].disable,n[0][2].lock,n[0][3].lock),a.add(t[3].fire),o[t[0]]=function(){return o[t[0]+"With"](this===o?void 0:this,arguments),this},o[t[0]+"With"]=a.fireWith}),i.promise(o),t&&t.call(o,o),o},when:function(e){var t=arguments.length,n=t,r=Array(n),i=o.call(arguments),a=w.Deferred(),s=function(e){return function(n){r[e]=this,i[e]=arguments.length>1?o.call(arguments):n,--t||a.resolveWith(r,i)}};if(t<=1&&($(e,a.done(s(n)).resolve,a.reject,!t),"pending"===a.state()||g(i[n]&&i[n].then)))return a.then();while(n--)$(i[n],s(n),a.reject);return a.promise()}});var B=/^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;w.Deferred.exceptionHook=function(t,n){e.console&&e.console.warn&&t&&B.test(t.name)&&e.console.warn("jQuery.Deferred exception: "+t.message,t.stack,n)},w.readyException=function(t){e.setTimeout(function(){throw t})};var F=w.Deferred();w.fn.ready=function(e){return F.then(e)["catch"](function(e){w.readyException(e)}),this},w.extend({isReady:!1,readyWait:1,ready:function(e){(!0===e?--w.readyWait:w.isReady)||(w.isReady=!0,!0!==e&&--w.readyWait>0||F.resolveWith(r,[w]))}}),w.ready.then=F.then;function _(){r.removeEventListener("DOMContentLoaded",_),e.removeEventListener("load",_),w.ready()}"complete"===r.readyState||"loading"!==r.readyState&&!r.documentElement.doScroll?e.setTimeout(w.ready):(r.addEventListener("DOMContentLoaded",_),e.addEventListener("load",_));var z=function(e,t,n,r,i,o,a){var s=0,u=e.length,l=null==n;if("object"===x(n)){i=!0;for(s in n)z(e,t,s,n[s],!0,o,a)}else if(void 0!==r&&(i=!0,g(r)||(a=!0),l&&(a?(t.call(e,r),t=null):(l=t,t=function(e,t,n){return l.call(w(e),n)})),t))for(;s<u;s++)t(e[s],n,a?r:r.call(e[s],s,t(e[s],n)));return i?e:l?t.call(e):u?t(e[0],n):o},X=/^-ms-/,U=/-([a-z])/g;function V(e,t){return t.toUpperCase()}function G(e){return e.replace(X,"ms-").replace(U,V)}var Y=function(e){return 1===e.nodeType||9===e.nodeType||!+e.nodeType};function Q(){this.expando=w.expando+Q.uid++}Q.uid=1,Q.prototype={cache:function(e){var t=e[this.expando];return t||(t={},Y(e)&&(e.nodeType?e[this.expando]=t:Object.defineProperty(e,this.expando,{value:t,configurable:!0}))),t},set:function(e,t,n){var r,i=this.cache(e);if("string"==typeof t)i[G(t)]=n;else for(r in t)i[G(r)]=t[r];return i},get:function(e,t){return void 0===t?this.cache(e):e[this.expando]&&e[this.expando][G(t)]},access:function(e,t,n){return void 0===t||t&&"string"==typeof t&&void 0===n?this.get(e,t):(this.set(e,t,n),void 0!==n?n:t)},remove:function(e,t){var n,r=e[this.expando];if(void 0!==r){if(void 0!==t){n=(t=Array.isArray(t)?t.map(G):(t=G(t))in r?[t]:t.match(M)||[]).length;while(n--)delete r[t[n]]}(void 0===t||w.isEmptyObject(r))&&(e.nodeType?e[this.expando]=void 0:delete e[this.expando])}},hasData:function(e){var t=e[this.expando];return void 0!==t&&!w.isEmptyObject(t)}};var J=new Q,K=new Q,Z=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,ee=/[A-Z]/g;function te(e){return"true"===e||"false"!==e&&("null"===e?null:e===+e+""?+e:Z.test(e)?JSON.parse(e):e)}function ne(e,t,n){var r;if(void 0===n&&1===e.nodeType)if(r="data-"+t.replace(ee,"-$&").toLowerCase(),"string"==typeof(n=e.getAttribute(r))){try{n=te(n)}catch(e){}K.set(e,t,n)}else n=void 0;return n}w.extend({hasData:function(e){return K.hasData(e)||J.hasData(e)},data:function(e,t,n){return K.access(e,t,n)},removeData:function(e,t){K.remove(e,t)},_data:function(e,t,n){return J.access(e,t,n)},_removeData:function(e,t){J.remove(e,t)}}),w.fn.extend({data:function(e,t){var n,r,i,o=this[0],a=o&&o.attributes;if(void 0===e){if(this.length&&(i=K.get(o),1===o.nodeType&&!J.get(o,"hasDataAttrs"))){n=a.length;while(n--)a[n]&&0===(r=a[n].name).indexOf("data-")&&(r=G(r.slice(5)),ne(o,r,i[r]));J.set(o,"hasDataAttrs",!0)}return i}return"object"==typeof e?this.each(function(){K.set(this,e)}):z(this,function(t){var n;if(o&&void 0===t){if(void 0!==(n=K.get(o,e)))return n;if(void 0!==(n=ne(o,e)))return n}else this.each(function(){K.set(this,e,t)})},null,t,arguments.length>1,null,!0)},removeData:function(e){return this.each(function(){K.remove(this,e)})}}),w.extend({queue:function(e,t,n){var r;if(e)return t=(t||"fx")+"queue",r=J.get(e,t),n&&(!r||Array.isArray(n)?r=J.access(e,t,w.makeArray(n)):r.push(n)),r||[]},dequeue:function(e,t){t=t||"fx";var n=w.queue(e,t),r=n.length,i=n.shift(),o=w._queueHooks(e,t),a=function(){w.dequeue(e,t)};"inprogress"===i&&(i=n.shift(),r--),i&&("fx"===t&&n.unshift("inprogress"),delete o.stop,i.call(e,a,o)),!r&&o&&o.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return J.get(e,n)||J.access(e,n,{empty:w.Callbacks("once memory").add(function(){J.remove(e,[t+"queue",n])})})}}),w.fn.extend({queue:function(e,t){var n=2;return"string"!=typeof e&&(t=e,e="fx",n--),arguments.length<n?w.queue(this[0],e):void 0===t?this:this.each(function(){var n=w.queue(this,e,t);w._queueHooks(this,e),"fx"===e&&"inprogress"!==n[0]&&w.dequeue(this,e)})},dequeue:function(e){return this.each(function(){w.dequeue(this,e)})},clearQueue:function(e){return this.queue(e||"fx",[])},promise:function(e,t){var n,r=1,i=w.Deferred(),o=this,a=this.length,s=function(){--r||i.resolveWith(o,[o])};"string"!=typeof e&&(t=e,e=void 0),e=e||"fx";while(a--)(n=J.get(o[a],e+"queueHooks"))&&n.empty&&(r++,n.empty.add(s));return s(),i.promise(t)}});var re=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,ie=new RegExp("^(?:([+-])=|)("+re+")([a-z%]*)$","i"),oe=["Top","Right","Bottom","Left"],ae=function(e,t){return"none"===(e=t||e).style.display||""===e.style.display&&w.contains(e.ownerDocument,e)&&"none"===w.css(e,"display")},se=function(e,t,n,r){var i,o,a={};for(o in t)a[o]=e.style[o],e.style[o]=t[o];i=n.apply(e,r||[]);for(o in t)e.style[o]=a[o];return i};function ue(e,t,n,r){var i,o,a=20,s=r?function(){return r.cur()}:function(){return w.css(e,t,"")},u=s(),l=n&&n[3]||(w.cssNumber[t]?"":"px"),c=(w.cssNumber[t]||"px"!==l&&+u)&&ie.exec(w.css(e,t));if(c&&c[3]!==l){u/=2,l=l||c[3],c=+u||1;while(a--)w.style(e,t,c+l),(1-o)*(1-(o=s()/u||.5))<=0&&(a=0),c/=o;c*=2,w.style(e,t,c+l),n=n||[]}return n&&(c=+c||+u||0,i=n[1]?c+(n[1]+1)*n[2]:+n[2],r&&(r.unit=l,r.start=c,r.end=i)),i}var le={};function ce(e){var t,n=e.ownerDocument,r=e.nodeName,i=le[r];return i||(t=n.body.appendChild(n.createElement(r)),i=w.css(t,"display"),t.parentNode.removeChild(t),"none"===i&&(i="block"),le[r]=i,i)}function fe(e,t){for(var n,r,i=[],o=0,a=e.length;o<a;o++)(r=e[o]).style&&(n=r.style.display,t?("none"===n&&(i[o]=J.get(r,"display")||null,i[o]||(r.style.display="")),""===r.style.display&&ae(r)&&(i[o]=ce(r))):"none"!==n&&(i[o]="none",J.set(r,"display",n)));for(o=0;o<a;o++)null!=i[o]&&(e[o].style.display=i[o]);return e}w.fn.extend({show:function(){return fe(this,!0)},hide:function(){return fe(this)},toggle:function(e){return"boolean"==typeof e?e?this.show():this.hide():this.each(function(){ae(this)?w(this).show():w(this).hide()})}});var pe=/^(?:checkbox|radio)$/i,de=/<([a-z][^\/\0>\x20\t\r\n\f]+)/i,he=/^$|^module$|\/(?:java|ecma)script/i,ge={option:[1,"<select multiple='multiple'>","</select>"],thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};ge.optgroup=ge.option,ge.tbody=ge.tfoot=ge.colgroup=ge.caption=ge.thead,ge.th=ge.td;function ye(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&N(e,t)?w.merge([e],n):n}function ve(e,t){for(var n=0,r=e.length;n<r;n++)J.set(e[n],"globalEval",!t||J.get(t[n],"globalEval"))}var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d<h;d++)if((o=e[d])||0===o)if("object"===x(o))w.merge(p,o.nodeType?[o]:o);else if(me.test(o)){a=a||f.appendChild(t.createElement("div")),s=(de.exec(o)||["",""])[1].toLowerCase(),u=ge[s]||ge._default,a.innerHTML=u[1]+w.htmlPrefilter(o)+u[2],c=u[0];while(c--)a=a.lastChild;w.merge(p,a.childNodes),(a=f.firstChild).textContent=""}else p.push(t.createTextNode(o));f.textContent="",d=0;while(o=p[d++])if(r&&w.inArray(o,r)>-1)i&&i.push(o);else if(l=w.contains(o.ownerDocument,o),a=ye(f.appendChild(o),"script"),l&&ve(a),n){c=0;while(o=a[c++])he.test(o.type||"")&&n.push(o)}return f}!function(){var e=r.createDocumentFragment().appendChild(r.createElement("div")),t=r.createElement("input");t.setAttribute("type","radio"),t.setAttribute("checked","checked"),t.setAttribute("name","t"),e.appendChild(t),h.checkClone=e.cloneNode(!0).cloneNode(!0).lastChild.checked,e.innerHTML="<textarea>x</textarea>",h.noCloneChecked=!!e.cloneNode(!0).lastChild.defaultValue}();var be=r.documentElement,we=/^key/,Te=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ce=/^([^.]*)(?:\.(.+)|)/;function Ee(){return!0}function ke(){return!1}function Se(){try{return r.activeElement}catch(e){}}function De(e,t,n,r,i,o){var a,s;if("object"==typeof t){"string"!=typeof n&&(r=r||n,n=void 0);for(s in t)De(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=ke;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return w().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=w.guid++)),e.each(function(){w.event.add(this,t,i,r,n)})}w.event={global:{},add:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,y=J.get(e);if(y){n.handler&&(n=(o=n).handler,i=o.selector),i&&w.find.matchesSelector(be,i),n.guid||(n.guid=w.guid++),(u=y.events)||(u=y.events={}),(a=y.handle)||(a=y.handle=function(t){return"undefined"!=typeof w&&w.event.triggered!==t.type?w.event.dispatch.apply(e,arguments):void 0}),l=(t=(t||"").match(M)||[""]).length;while(l--)d=g=(s=Ce.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=w.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=w.event.special[d]||{},c=w.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&w.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(e,r,h,a)||e.addEventListener&&e.addEventListener(d,a)),f.add&&(f.add.call(e,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),w.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,y=J.hasData(e)&&J.get(e);if(y&&(u=y.events)){l=(t=(t||"").match(M)||[""]).length;while(l--)if(s=Ce.exec(t[l])||[],d=g=s[1],h=(s[2]||"").split(".").sort(),d){f=w.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,y.handle)||w.removeEvent(e,d,y.handle),delete u[d])}else for(d in u)w.event.remove(e,d+t[l],n,r,!0);w.isEmptyObject(u)&&J.remove(e,"handle events")}},dispatch:function(e){var t=w.event.fix(e),n,r,i,o,a,s,u=new Array(arguments.length),l=(J.get(this,"events")||{})[t.type]||[],c=w.event.special[t.type]||{};for(u[0]=t,n=1;n<arguments.length;n++)u[n]=arguments[n];if(t.delegateTarget=this,!c.preDispatch||!1!==c.preDispatch.call(this,t)){s=w.event.handlers.call(this,t,l),n=0;while((o=s[n++])&&!t.isPropagationStopped()){t.currentTarget=o.elem,r=0;while((a=o.handlers[r++])&&!t.isImmediatePropagationStopped())t.rnamespace&&!t.rnamespace.test(a.namespace)||(t.handleObj=a,t.data=a.data,void 0!==(i=((w.event.special[a.origType]||{}).handle||a.handler).apply(o.elem,u))&&!1===(t.result=i)&&(t.preventDefault(),t.stopPropagation()))}return c.postDispatch&&c.postDispatch.call(this,t),t.result}},handlers:function(e,t){var n,r,i,o,a,s=[],u=t.delegateCount,l=e.target;if(u&&l.nodeType&&!("click"===e.type&&e.button>=1))for(;l!==this;l=l.parentNode||this)if(1===l.nodeType&&("click"!==e.type||!0!==l.disabled)){for(o=[],a={},n=0;n<u;n++)void 0===a[i=(r=t[n]).selector+" "]&&(a[i]=r.needsContext?w(i,this).index(l)>-1:w.find(i,this,null,[l]).length),a[i]&&o.push(r);o.length&&s.push({elem:l,handlers:o})}return l=this,u<t.length&&s.push({elem:l,handlers:t.slice(u)}),s},addProp:function(e,t){Object.defineProperty(w.Event.prototype,e,{enumerable:!0,configurable:!0,get:g(t)?function(){if(this.originalEvent)return t(this.originalEvent)}:function(){if(this.originalEvent)return this.originalEvent[e]},set:function(t){Object.defineProperty(this,e,{enumerable:!0,configurable:!0,writable:!0,value:t})}})},fix:function(e){return e[w.expando]?e:new w.Event(e)},special:{load:{noBubble:!0},focus:{trigger:function(){if(this!==Se()&&this.focus)return this.focus(),!1},delegateType:"focusin"},blur:{trigger:function(){if(this===Se()&&this.blur)return this.blur(),!1},delegateType:"focusout"},click:{trigger:function(){if("checkbox"===this.type&&this.click&&N(this,"input"))return this.click(),!1},_default:function(e){return N(e.target,"a")}},beforeunload:{postDispatch:function(e){void 0!==e.result&&e.originalEvent&&(e.originalEvent.returnValue=e.result)}}}},w.removeEvent=function(e,t,n){e.removeEventListener&&e.removeEventListener(t,n)},w.Event=function(e,t){if(!(this instanceof w.Event))return new w.Event(e,t);e&&e.type?(this.originalEvent=e,this.type=e.type,this.isDefaultPrevented=e.defaultPrevented||void 0===e.defaultPrevented&&!1===e.returnValue?Ee:ke,this.target=e.target&&3===e.target.nodeType?e.target.parentNode:e.target,this.currentTarget=e.currentTarget,this.relatedTarget=e.relatedTarget):this.type=e,t&&w.extend(this,t),this.timeStamp=e&&e.timeStamp||Date.now(),this[w.expando]=!0},w.Event.prototype={constructor:w.Event,isDefaultPrevented:ke,isPropagationStopped:ke,isImmediatePropagationStopped:ke,isSimulated:!1,preventDefault:function(){var e=this.originalEvent;this.isDefaultPrevented=Ee,e&&!this.isSimulated&&e.preventDefault()},stopPropagation:function(){var e=this.originalEvent;this.isPropagationStopped=Ee,e&&!this.isSimulated&&e.stopPropagation()},stopImmediatePropagation:function(){var e=this.originalEvent;this.isImmediatePropagationStopped=Ee,e&&!this.isSimulated&&e.stopImmediatePropagation(),this.stopPropagation()}},w.each({altKey:!0,bubbles:!0,cancelable:!0,changedTouches:!0,ctrlKey:!0,detail:!0,eventPhase:!0,metaKey:!0,pageX:!0,pageY:!0,shiftKey:!0,view:!0,"char":!0,charCode:!0,key:!0,keyCode:!0,button:!0,buttons:!0,clientX:!0,clientY:!0,offsetX:!0,offsetY:!0,pointerId:!0,pointerType:!0,screenX:!0,screenY:!0,targetTouches:!0,toElement:!0,touches:!0,which:function(e){var t=e.button;return null==e.which&&we.test(e.type)?null!=e.charCode?e.charCode:e.keyCode:!e.which&&void 0!==t&&Te.test(e.type)?1&t?1:2&t?3:4&t?2:0:e.which}},w.event.addProp),w.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(e,t){w.event.special[e]={delegateType:t,bindType:t,handle:function(e){var n,r=this,i=e.relatedTarget,o=e.handleObj;return i&&(i===r||w.contains(r,i))||(e.type=o.origType,n=o.handler.apply(this,arguments),e.type=t),n}}}),w.fn.extend({on:function(e,t,n,r){return De(this,e,t,n,r)},one:function(e,t,n,r){return De(this,e,t,n,r,1)},off:function(e,t,n){var r,i;if(e&&e.preventDefault&&e.handleObj)return r=e.handleObj,w(e.delegateTarget).off(r.namespace?r.origType+"."+r.namespace:r.origType,r.selector,r.handler),this;if("object"==typeof e){for(i in e)this.off(i,t,e[i]);return this}return!1!==t&&"function"!=typeof t||(n=t,t=void 0),!1===n&&(n=ke),this.each(function(){w.event.remove(this,e,n,t)})}});var Ne=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi,Ae=/<script|<style|<link/i,je=/checked\s*(?:[^=]|=\s*.checked.)/i,qe=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g;function Le(e,t){return N(e,"table")&&N(11!==t.nodeType?t:t.firstChild,"tr")?w(e).children("tbody")[0]||e:e}function He(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Oe(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Pe(e,t){var n,r,i,o,a,s,u,l;if(1===t.nodeType){if(J.hasData(e)&&(o=J.access(e),a=J.set(t,o),l=o.events)){delete a.handle,a.events={};for(i in l)for(n=0,r=l[i].length;n<r;n++)w.event.add(t,i,l[i][n])}K.hasData(e)&&(s=K.access(e),u=w.extend({},s),K.set(t,u))}}function Me(e,t){var n=t.nodeName.toLowerCase();"input"===n&&pe.test(e.type)?t.checked=e.checked:"input"!==n&&"textarea"!==n||(t.defaultValue=e.defaultValue)}function Re(e,t,n,r){t=a.apply([],t);var i,o,s,u,l,c,f=0,p=e.length,d=p-1,y=t[0],v=g(y);if(v||p>1&&"string"==typeof y&&!h.checkClone&&je.test(y))return e.each(function(i){var o=e.eq(i);v&&(t[0]=y.call(this,i,o.html())),Re(o,t,n,r)});if(p&&(i=xe(t,e[0].ownerDocument,!1,e,r),o=i.firstChild,1===i.childNodes.length&&(i=o),o||r)){for(u=(s=w.map(ye(i,"script"),He)).length;f<p;f++)l=i,f!==d&&(l=w.clone(l,!0,!0),u&&w.merge(s,ye(l,"script"))),n.call(e[f],l,f);if(u)for(c=s[s.length-1].ownerDocument,w.map(s,Oe),f=0;f<u;f++)l=s[f],he.test(l.type||"")&&!J.access(l,"globalEval")&&w.contains(c,l)&&(l.src&&"module"!==(l.type||"").toLowerCase()?w._evalUrl&&w._evalUrl(l.src):m(l.textContent.replace(qe,""),c,l))}return e}function Ie(e,t,n){for(var r,i=t?w.filter(t,e):e,o=0;null!=(r=i[o]);o++)n||1!==r.nodeType||w.cleanData(ye(r)),r.parentNode&&(n&&w.contains(r.ownerDocument,r)&&ve(ye(r,"script")),r.parentNode.removeChild(r));return e}w.extend({htmlPrefilter:function(e){return e.replace(Ne,"<$1></$2>")},clone:function(e,t,n){var r,i,o,a,s=e.cloneNode(!0),u=w.contains(e.ownerDocument,e);if(!(h.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||w.isXMLDoc(e)))for(a=ye(s),r=0,i=(o=ye(e)).length;r<i;r++)Me(o[r],a[r]);if(t)if(n)for(o=o||ye(e),a=a||ye(s),r=0,i=o.length;r<i;r++)Pe(o[r],a[r]);else Pe(e,s);return(a=ye(s,"script")).length>0&&ve(a,!u&&ye(e,"script")),s},cleanData:function(e){for(var t,n,r,i=w.event.special,o=0;void 0!==(n=e[o]);o++)if(Y(n)){if(t=n[J.expando]){if(t.events)for(r in t.events)i[r]?w.event.remove(n,r):w.removeEvent(n,r,t.handle);n[J.expando]=void 0}n[K.expando]&&(n[K.expando]=void 0)}}}),w.fn.extend({detach:function(e){return Ie(this,e,!0)},remove:function(e){return Ie(this,e)},text:function(e){return z(this,function(e){return void 0===e?w.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=e)})},null,e,arguments.length)},append:function(){return Re(this,arguments,function(e){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||Le(this,e).appendChild(e)})},prepend:function(){return Re(this,arguments,function(e){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var t=Le(this,e);t.insertBefore(e,t.firstChild)}})},before:function(){return Re(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this)})},after:function(){return Re(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)})},empty:function(){for(var e,t=0;null!=(e=this[t]);t++)1===e.nodeType&&(w.cleanData(ye(e,!1)),e.textContent="");return this},clone:function(e,t){return e=null!=e&&e,t=null==t?e:t,this.map(function(){return w.clone(this,e,t)})},html:function(e){return z(this,function(e){var t=this[0]||{},n=0,r=this.length;if(void 0===e&&1===t.nodeType)return t.innerHTML;if("string"==typeof e&&!Ae.test(e)&&!ge[(de.exec(e)||["",""])[1].toLowerCase()]){e=w.htmlPrefilter(e);try{for(;n<r;n++)1===(t=this[n]||{}).nodeType&&(w.cleanData(ye(t,!1)),t.innerHTML=e);t=0}catch(e){}}t&&this.empty().append(e)},null,e,arguments.length)},replaceWith:function(){var e=[];return Re(this,arguments,function(t){var n=this.parentNode;w.inArray(this,e)<0&&(w.cleanData(ye(this)),n&&n.replaceChild(t,this))},e)}}),w.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(e,t){w.fn[e]=function(e){for(var n,r=[],i=w(e),o=i.length-1,a=0;a<=o;a++)n=a===o?this:this.clone(!0),w(i[a])[t](n),s.apply(r,n.get());return this.pushStack(r)}});var We=new RegExp("^("+re+")(?!px)[a-z%]+$","i"),$e=function(t){var n=t.ownerDocument.defaultView;return n&&n.opener||(n=e),n.getComputedStyle(t)},Be=new RegExp(oe.join("|"),"i");!function(){function t(){if(c){l.style.cssText="position:absolute;left:-11111px;width:60px;margin-top:1px;padding:0;border:0",c.style.cssText="position:relative;display:block;box-sizing:border-box;overflow:scroll;margin:auto;border:1px;padding:1px;width:60%;top:1%",be.appendChild(l).appendChild(c);var t=e.getComputedStyle(c);i="1%"!==t.top,u=12===n(t.marginLeft),c.style.right="60%",s=36===n(t.right),o=36===n(t.width),c.style.position="absolute",a=36===c.offsetWidth||"absolute",be.removeChild(l),c=null}}function n(e){return Math.round(parseFloat(e))}var i,o,a,s,u,l=r.createElement("div"),c=r.createElement("div");c.style&&(c.style.backgroundClip="content-box",c.cloneNode(!0).style.backgroundClip="",h.clearCloneStyle="content-box"===c.style.backgroundClip,w.extend(h,{boxSizingReliable:function(){return t(),o},pixelBoxStyles:function(){return t(),s},pixelPosition:function(){return t(),i},reliableMarginLeft:function(){return t(),u},scrollboxSize:function(){return t(),a}}))}();function Fe(e,t,n){var r,i,o,a,s=e.style;return(n=n||$e(e))&&(""!==(a=n.getPropertyValue(t)||n[t])||w.contains(e.ownerDocument,e)||(a=w.style(e,t)),!h.pixelBoxStyles()&&We.test(a)&&Be.test(t)&&(r=s.width,i=s.minWidth,o=s.maxWidth,s.minWidth=s.maxWidth=s.width=a,a=n.width,s.width=r,s.minWidth=i,s.maxWidth=o)),void 0!==a?a+"":a}function _e(e,t){return{get:function(){if(!e())return(this.get=t).apply(this,arguments);delete this.get}}}var ze=/^(none|table(?!-c[ea]).+)/,Xe=/^--/,Ue={position:"absolute",visibility:"hidden",display:"block"},Ve={letterSpacing:"0",fontWeight:"400"},Ge=["Webkit","Moz","ms"],Ye=r.createElement("div").style;function Qe(e){if(e in Ye)return e;var t=e[0].toUpperCase()+e.slice(1),n=Ge.length;while(n--)if((e=Ge[n]+t)in Ye)return e}function Je(e){var t=w.cssProps[e];return t||(t=w.cssProps[e]=Qe(e)||e),t}function Ke(e,t,n){var r=ie.exec(t);return r?Math.max(0,r[2]-(n||0))+(r[3]||"px"):t}function Ze(e,t,n,r,i,o){var a="width"===t?1:0,s=0,u=0;if(n===(r?"border":"content"))return 0;for(;a<4;a+=2)"margin"===n&&(u+=w.css(e,n+oe[a],!0,i)),r?("content"===n&&(u-=w.css(e,"padding"+oe[a],!0,i)),"margin"!==n&&(u-=w.css(e,"border"+oe[a]+"Width",!0,i))):(u+=w.css(e,"padding"+oe[a],!0,i),"padding"!==n?u+=w.css(e,"border"+oe[a]+"Width",!0,i):s+=w.css(e,"border"+oe[a]+"Width",!0,i));return!r&&o>=0&&(u+=Math.max(0,Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-o-u-s-.5))),u}function et(e,t,n){var r=$e(e),i=Fe(e,t,r),o="border-box"===w.css(e,"boxSizing",!1,r),a=o;if(We.test(i)){if(!n)return i;i="auto"}return a=a&&(h.boxSizingReliable()||i===e.style[t]),("auto"===i||!parseFloat(i)&&"inline"===w.css(e,"display",!1,r))&&(i=e["offset"+t[0].toUpperCase()+t.slice(1)],a=!0),(i=parseFloat(i)||0)+Ze(e,t,n||(o?"border":"content"),a,r,i)+"px"}w.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=Fe(e,"opacity");return""===n?"1":n}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{},style:function(e,t,n,r){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var i,o,a,s=G(t),u=Xe.test(t),l=e.style;if(u||(t=Je(s)),a=w.cssHooks[t]||w.cssHooks[s],void 0===n)return a&&"get"in a&&void 0!==(i=a.get(e,!1,r))?i:l[t];"string"==(o=typeof n)&&(i=ie.exec(n))&&i[1]&&(n=ue(e,t,i),o="number"),null!=n&&n===n&&("number"===o&&(n+=i&&i[3]||(w.cssNumber[s]?"":"px")),h.clearCloneStyle||""!==n||0!==t.indexOf("background")||(l[t]="inherit"),a&&"set"in a&&void 0===(n=a.set(e,n,r))||(u?l.setProperty(t,n):l[t]=n))}},css:function(e,t,n,r){var i,o,a,s=G(t);return Xe.test(t)||(t=Je(s)),(a=w.cssHooks[t]||w.cssHooks[s])&&"get"in a&&(i=a.get(e,!0,n)),void 0===i&&(i=Fe(e,t,r)),"normal"===i&&t in Ve&&(i=Ve[t]),""===n||n?(o=parseFloat(i),!0===n||isFinite(o)?o||0:i):i}}),w.each(["height","width"],function(e,t){w.cssHooks[t]={get:function(e,n,r){if(n)return!ze.test(w.css(e,"display"))||e.getClientRects().length&&e.getBoundingClientRect().width?et(e,t,r):se(e,Ue,function(){return et(e,t,r)})},set:function(e,n,r){var i,o=$e(e),a="border-box"===w.css(e,"boxSizing",!1,o),s=r&&Ze(e,t,r,a,o);return a&&h.scrollboxSize()===o.position&&(s-=Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-parseFloat(o[t])-Ze(e,t,"border",!1,o)-.5)),s&&(i=ie.exec(n))&&"px"!==(i[3]||"px")&&(e.style[t]=n,n=w.css(e,t)),Ke(e,n,s)}}}),w.cssHooks.marginLeft=_e(h.reliableMarginLeft,function(e,t){if(t)return(parseFloat(Fe(e,"marginLeft"))||e.getBoundingClientRect().left-se(e,{marginLeft:0},function(){return e.getBoundingClientRect().left}))+"px"}),w.each({margin:"",padding:"",border:"Width"},function(e,t){w.cssHooks[e+t]={expand:function(n){for(var r=0,i={},o="string"==typeof n?n.split(" "):[n];r<4;r++)i[e+oe[r]+t]=o[r]||o[r-2]||o[0];return i}},"margin"!==e&&(w.cssHooks[e+t].set=Ke)}),w.fn.extend({css:function(e,t){return z(this,function(e,t,n){var r,i,o={},a=0;if(Array.isArray(t)){for(r=$e(e),i=t.length;a<i;a++)o[t[a]]=w.css(e,t[a],!1,r);return o}return void 0!==n?w.style(e,t,n):w.css(e,t)},e,t,arguments.length>1)}});function tt(e,t,n,r,i){return new tt.prototype.init(e,t,n,r,i)}w.Tween=tt,tt.prototype={constructor:tt,init:function(e,t,n,r,i,o){this.elem=e,this.prop=n,this.easing=i||w.easing._default,this.options=t,this.start=this.now=this.cur(),this.end=r,this.unit=o||(w.cssNumber[n]?"":"px")},cur:function(){var e=tt.propHooks[this.prop];return e&&e.get?e.get(this):tt.propHooks._default.get(this)},run:function(e){var t,n=tt.propHooks[this.prop];return this.options.duration?this.pos=t=w.easing[this.easing](e,this.options.duration*e,0,1,this.options.duration):this.pos=t=e,this.now=(this.end-this.start)*t+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),n&&n.set?n.set(this):tt.propHooks._default.set(this),this}},tt.prototype.init.prototype=tt.prototype,tt.propHooks={_default:{get:function(e){var t;return 1!==e.elem.nodeType||null!=e.elem[e.prop]&&null==e.elem.style[e.prop]?e.elem[e.prop]:(t=w.css(e.elem,e.prop,""))&&"auto"!==t?t:0},set:function(e){w.fx.step[e.prop]?w.fx.step[e.prop](e):1!==e.elem.nodeType||null==e.elem.style[w.cssProps[e.prop]]&&!w.cssHooks[e.prop]?e.elem[e.prop]=e.now:w.style(e.elem,e.prop,e.now+e.unit)}}},tt.propHooks.scrollTop=tt.propHooks.scrollLeft={set:function(e){e.elem.nodeType&&e.elem.parentNode&&(e.elem[e.prop]=e.now)}},w.easing={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2},_default:"swing"},w.fx=tt.prototype.init,w.fx.step={};var nt,rt,it=/^(?:toggle|show|hide)$/,ot=/queueHooks$/;function at(){rt&&(!1===r.hidden&&e.requestAnimationFrame?e.requestAnimationFrame(at):e.setTimeout(at,w.fx.interval),w.fx.tick())}function st(){return e.setTimeout(function(){nt=void 0}),nt=Date.now()}function ut(e,t){var n,r=0,i={height:e};for(t=t?1:0;r<4;r+=2-t)i["margin"+(n=oe[r])]=i["padding"+n]=e;return t&&(i.opacity=i.width=e),i}function lt(e,t,n){for(var r,i=(pt.tweeners[t]||[]).concat(pt.tweeners["*"]),o=0,a=i.length;o<a;o++)if(r=i[o].call(n,t,e))return r}function ct(e,t,n){var r,i,o,a,s,u,l,c,f="width"in t||"height"in t,p=this,d={},h=e.style,g=e.nodeType&&ae(e),y=J.get(e,"fxshow");n.queue||(null==(a=w._queueHooks(e,"fx")).unqueued&&(a.unqueued=0,s=a.empty.fire,a.empty.fire=function(){a.unqueued||s()}),a.unqueued++,p.always(function(){p.always(function(){a.unqueued--,w.queue(e,"fx").length||a.empty.fire()})}));for(r in t)if(i=t[r],it.test(i)){if(delete t[r],o=o||"toggle"===i,i===(g?"hide":"show")){if("show"!==i||!y||void 0===y[r])continue;g=!0}d[r]=y&&y[r]||w.style(e,r)}if((u=!w.isEmptyObject(t))||!w.isEmptyObject(d)){f&&1===e.nodeType&&(n.overflow=[h.overflow,h.overflowX,h.overflowY],null==(l=y&&y.display)&&(l=J.get(e,"display")),"none"===(c=w.css(e,"display"))&&(l?c=l:(fe([e],!0),l=e.style.display||l,c=w.css(e,"display"),fe([e]))),("inline"===c||"inline-block"===c&&null!=l)&&"none"===w.css(e,"float")&&(u||(p.done(function(){h.display=l}),null==l&&(c=h.display,l="none"===c?"":c)),h.display="inline-block")),n.overflow&&(h.overflow="hidden",p.always(function(){h.overflow=n.overflow[0],h.overflowX=n.overflow[1],h.overflowY=n.overflow[2]})),u=!1;for(r in d)u||(y?"hidden"in y&&(g=y.hidden):y=J.access(e,"fxshow",{display:l}),o&&(y.hidden=!g),g&&fe([e],!0),p.done(function(){g||fe([e]),J.remove(e,"fxshow");for(r in d)w.style(e,r,d[r])})),u=lt(g?y[r]:0,r,p),r in y||(y[r]=u.start,g&&(u.end=u.start,u.start=0))}}function ft(e,t){var n,r,i,o,a;for(n in e)if(r=G(n),i=t[r],o=e[n],Array.isArray(o)&&(i=o[1],o=e[n]=o[0]),n!==r&&(e[r]=o,delete e[n]),(a=w.cssHooks[r])&&"expand"in a){o=a.expand(o),delete e[r];for(n in o)n in e||(e[n]=o[n],t[n]=i)}else t[r]=i}function pt(e,t,n){var r,i,o=0,a=pt.prefilters.length,s=w.Deferred().always(function(){delete u.elem}),u=function(){if(i)return!1;for(var t=nt||st(),n=Math.max(0,l.startTime+l.duration-t),r=1-(n/l.duration||0),o=0,a=l.tweens.length;o<a;o++)l.tweens[o].run(r);return s.notifyWith(e,[l,r,n]),r<1&&a?n:(a||s.notifyWith(e,[l,1,0]),s.resolveWith(e,[l]),!1)},l=s.promise({elem:e,props:w.extend({},t),opts:w.extend(!0,{specialEasing:{},easing:w.easing._default},n),originalProperties:t,originalOptions:n,startTime:nt||st(),duration:n.duration,tweens:[],createTween:function(t,n){var r=w.Tween(e,l.opts,t,n,l.opts.specialEasing[t]||l.opts.easing);return l.tweens.push(r),r},stop:function(t){var n=0,r=t?l.tweens.length:0;if(i)return this;for(i=!0;n<r;n++)l.tweens[n].run(1);return t?(s.notifyWith(e,[l,1,0]),s.resolveWith(e,[l,t])):s.rejectWith(e,[l,t]),this}}),c=l.props;for(ft(c,l.opts.specialEasing);o<a;o++)if(r=pt.prefilters[o].call(l,e,c,l.opts))return g(r.stop)&&(w._queueHooks(l.elem,l.opts.queue).stop=r.stop.bind(r)),r;return w.map(c,lt,l),g(l.opts.start)&&l.opts.start.call(e,l),l.progress(l.opts.progress).done(l.opts.done,l.opts.complete).fail(l.opts.fail).always(l.opts.always),w.fx.timer(w.extend(u,{elem:e,anim:l,queue:l.opts.queue})),l}w.Animation=w.extend(pt,{tweeners:{"*":[function(e,t){var n=this.createTween(e,t);return ue(n.elem,e,ie.exec(t),n),n}]},tweener:function(e,t){g(e)?(t=e,e=["*"]):e=e.match(M);for(var n,r=0,i=e.length;r<i;r++)n=e[r],pt.tweeners[n]=pt.tweeners[n]||[],pt.tweeners[n].unshift(t)},prefilters:[ct],prefilter:function(e,t){t?pt.prefilters.unshift(e):pt.prefilters.push(e)}}),w.speed=function(e,t,n){var r=e&&"object"==typeof e?w.extend({},e):{complete:n||!n&&t||g(e)&&e,duration:e,easing:n&&t||t&&!g(t)&&t};return w.fx.off?r.duration=0:"number"!=typeof r.duration&&(r.duration in w.fx.speeds?r.duration=w.fx.speeds[r.duration]:r.duration=w.fx.speeds._default),null!=r.queue&&!0!==r.queue||(r.queue="fx"),r.old=r.complete,r.complete=function(){g(r.old)&&r.old.call(this),r.queue&&w.dequeue(this,r.queue)},r},w.fn.extend({fadeTo:function(e,t,n,r){return this.filter(ae).css("opacity",0).show().end().animate({opacity:t},e,n,r)},animate:function(e,t,n,r){var i=w.isEmptyObject(e),o=w.speed(t,n,r),a=function(){var t=pt(this,w.extend({},e),o);(i||J.get(this,"finish"))&&t.stop(!0)};return a.finish=a,i||!1===o.queue?this.each(a):this.queue(o.queue,a)},stop:function(e,t,n){var r=function(e){var t=e.stop;delete e.stop,t(n)};return"string"!=typeof e&&(n=t,t=e,e=void 0),t&&!1!==e&&this.queue(e||"fx",[]),this.each(function(){var t=!0,i=null!=e&&e+"queueHooks",o=w.timers,a=J.get(this);if(i)a[i]&&a[i].stop&&r(a[i]);else for(i in a)a[i]&&a[i].stop&&ot.test(i)&&r(a[i]);for(i=o.length;i--;)o[i].elem!==this||null!=e&&o[i].queue!==e||(o[i].anim.stop(n),t=!1,o.splice(i,1));!t&&n||w.dequeue(this,e)})},finish:function(e){return!1!==e&&(e=e||"fx"),this.each(function(){var t,n=J.get(this),r=n[e+"queue"],i=n[e+"queueHooks"],o=w.timers,a=r?r.length:0;for(n.finish=!0,w.queue(this,e,[]),i&&i.stop&&i.stop.call(this,!0),t=o.length;t--;)o[t].elem===this&&o[t].queue===e&&(o[t].anim.stop(!0),o.splice(t,1));for(t=0;t<a;t++)r[t]&&r[t].finish&&r[t].finish.call(this);delete n.finish})}}),w.each(["toggle","show","hide"],function(e,t){var n=w.fn[t];w.fn[t]=function(e,r,i){return null==e||"boolean"==typeof e?n.apply(this,arguments):this.animate(ut(t,!0),e,r,i)}}),w.each({slideDown:ut("show"),slideUp:ut("hide"),slideToggle:ut("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(e,t){w.fn[e]=function(e,n,r){return this.animate(t,e,n,r)}}),w.timers=[],w.fx.tick=function(){var e,t=0,n=w.timers;for(nt=Date.now();t<n.length;t++)(e=n[t])()||n[t]!==e||n.splice(t--,1);n.length||w.fx.stop(),nt=void 0},w.fx.timer=function(e){w.timers.push(e),w.fx.start()},w.fx.interval=13,w.fx.start=function(){rt||(rt=!0,at())},w.fx.stop=function(){rt=null},w.fx.speeds={slow:600,fast:200,_default:400},w.fn.delay=function(t,n){return t=w.fx?w.fx.speeds[t]||t:t,n=n||"fx",this.queue(n,function(n,r){var i=e.setTimeout(n,t);r.stop=function(){e.clearTimeout(i)}})},function(){var e=r.createElement("input"),t=r.createElement("select").appendChild(r.createElement("option"));e.type="checkbox",h.checkOn=""!==e.value,h.optSelected=t.selected,(e=r.createElement("input")).value="t",e.type="radio",h.radioValue="t"===e.value}();var dt,ht=w.expr.attrHandle;w.fn.extend({attr:function(e,t){return z(this,w.attr,e,t,arguments.length>1)},removeAttr:function(e){return this.each(function(){w.removeAttr(this,e)})}}),w.extend({attr:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return"undefined"==typeof e.getAttribute?w.prop(e,t,n):(1===o&&w.isXMLDoc(e)||(i=w.attrHooks[t.toLowerCase()]||(w.expr.match.bool.test(t)?dt:void 0)),void 0!==n?null===n?void w.removeAttr(e,t):i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:(e.setAttribute(t,n+""),n):i&&"get"in i&&null!==(r=i.get(e,t))?r:null==(r=w.find.attr(e,t))?void 0:r)},attrHooks:{type:{set:function(e,t){if(!h.radioValue&&"radio"===t&&N(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},removeAttr:function(e,t){var n,r=0,i=t&&t.match(M);if(i&&1===e.nodeType)while(n=i[r++])e.removeAttribute(n)}}),dt={set:function(e,t,n){return!1===t?w.removeAttr(e,n):e.setAttribute(n,n),n}},w.each(w.expr.match.bool.source.match(/\w+/g),function(e,t){var n=ht[t]||w.find.attr;ht[t]=function(e,t,r){var i,o,a=t.toLowerCase();return r||(o=ht[a],ht[a]=i,i=null!=n(e,t,r)?a:null,ht[a]=o),i}});var gt=/^(?:input|select|textarea|button)$/i,yt=/^(?:a|area)$/i;w.fn.extend({prop:function(e,t){return z(this,w.prop,e,t,arguments.length>1)},removeProp:function(e){return this.each(function(){delete this[w.propFix[e]||e]})}}),w.extend({prop:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return 1===o&&w.isXMLDoc(e)||(t=w.propFix[t]||t,i=w.propHooks[t]),void 0!==n?i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:e[t]=n:i&&"get"in i&&null!==(r=i.get(e,t))?r:e[t]},propHooks:{tabIndex:{get:function(e){var t=w.find.attr(e,"tabindex");return t?parseInt(t,10):gt.test(e.nodeName)||yt.test(e.nodeName)&&e.href?0:-1}}},propFix:{"for":"htmlFor","class":"className"}}),h.optSelected||(w.propHooks.selected={get:function(e){var t=e.parentNode;return t&&t.parentNode&&t.parentNode.selectedIndex,null},set:function(e){var t=e.parentNode;t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex)}}),w.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){w.propFix[this.toLowerCase()]=this});function vt(e){return(e.match(M)||[]).join(" ")}function mt(e){return e.getAttribute&&e.getAttribute("class")||""}function xt(e){return Array.isArray(e)?e:"string"==typeof e?e.match(M)||[]:[]}w.fn.extend({addClass:function(e){var t,n,r,i,o,a,s,u=0;if(g(e))return this.each(function(t){w(this).addClass(e.call(this,t,mt(this)))});if((t=xt(e)).length)while(n=this[u++])if(i=mt(n),r=1===n.nodeType&&" "+vt(i)+" "){a=0;while(o=t[a++])r.indexOf(" "+o+" ")<0&&(r+=o+" ");i!==(s=vt(r))&&n.setAttribute("class",s)}return this},removeClass:function(e){var t,n,r,i,o,a,s,u=0;if(g(e))return this.each(function(t){w(this).removeClass(e.call(this,t,mt(this)))});if(!arguments.length)return this.attr("class","");if((t=xt(e)).length)while(n=this[u++])if(i=mt(n),r=1===n.nodeType&&" "+vt(i)+" "){a=0;while(o=t[a++])while(r.indexOf(" "+o+" ")>-1)r=r.replace(" "+o+" "," ");i!==(s=vt(r))&&n.setAttribute("class",s)}return this},toggleClass:function(e,t){var n=typeof e,r="string"===n||Array.isArray(e);return"boolean"==typeof t&&r?t?this.addClass(e):this.removeClass(e):g(e)?this.each(function(n){w(this).toggleClass(e.call(this,n,mt(this),t),t)}):this.each(function(){var t,i,o,a;if(r){i=0,o=w(this),a=xt(e);while(t=a[i++])o.hasClass(t)?o.removeClass(t):o.addClass(t)}else void 0!==e&&"boolean"!==n||((t=mt(this))&&J.set(this,"__className__",t),this.setAttribute&&this.setAttribute("class",t||!1===e?"":J.get(this,"__className__")||""))})},hasClass:function(e){var t,n,r=0;t=" "+e+" ";while(n=this[r++])if(1===n.nodeType&&(" "+vt(mt(n))+" ").indexOf(t)>-1)return!0;return!1}});var bt=/\r/g;w.fn.extend({val:function(e){var t,n,r,i=this[0];{if(arguments.length)return r=g(e),this.each(function(n){var i;1===this.nodeType&&(null==(i=r?e.call(this,n,w(this).val()):e)?i="":"number"==typeof i?i+="":Array.isArray(i)&&(i=w.map(i,function(e){return null==e?"":e+""})),(t=w.valHooks[this.type]||w.valHooks[this.nodeName.toLowerCase()])&&"set"in t&&void 0!==t.set(this,i,"value")||(this.value=i))});if(i)return(t=w.valHooks[i.type]||w.valHooks[i.nodeName.toLowerCase()])&&"get"in t&&void 0!==(n=t.get(i,"value"))?n:"string"==typeof(n=i.value)?n.replace(bt,""):null==n?"":n}}}),w.extend({valHooks:{option:{get:function(e){var t=w.find.attr(e,"value");return null!=t?t:vt(w.text(e))}},select:{get:function(e){var t,n,r,i=e.options,o=e.selectedIndex,a="select-one"===e.type,s=a?null:[],u=a?o+1:i.length;for(r=o<0?u:a?o:0;r<u;r++)if(((n=i[r]).selected||r===o)&&!n.disabled&&(!n.parentNode.disabled||!N(n.parentNode,"optgroup"))){if(t=w(n).val(),a)return t;s.push(t)}return s},set:function(e,t){var n,r,i=e.options,o=w.makeArray(t),a=i.length;while(a--)((r=i[a]).selected=w.inArray(w.valHooks.option.get(r),o)>-1)&&(n=!0);return n||(e.selectedIndex=-1),o}}}}),w.each(["radio","checkbox"],function(){w.valHooks[this]={set:function(e,t){if(Array.isArray(t))return e.checked=w.inArray(w(e).val(),t)>-1}},h.checkOn||(w.valHooks[this].get=function(e){return null===e.getAttribute("value")?"on":e.value})}),h.focusin="onfocusin"in e;var wt=/^(?:focusinfocus|focusoutblur)$/,Tt=function(e){e.stopPropagation()};w.extend(w.event,{trigger:function(t,n,i,o){var a,s,u,l,c,p,d,h,v=[i||r],m=f.call(t,"type")?t.type:t,x=f.call(t,"namespace")?t.namespace.split("."):[];if(s=h=u=i=i||r,3!==i.nodeType&&8!==i.nodeType&&!wt.test(m+w.event.triggered)&&(m.indexOf(".")>-1&&(m=(x=m.split(".")).shift(),x.sort()),c=m.indexOf(":")<0&&"on"+m,t=t[w.expando]?t:new w.Event(m,"object"==typeof t&&t),t.isTrigger=o?2:3,t.namespace=x.join("."),t.rnamespace=t.namespace?new RegExp("(^|\\.)"+x.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,t.result=void 0,t.target||(t.target=i),n=null==n?[t]:w.makeArray(n,[t]),d=w.event.special[m]||{},o||!d.trigger||!1!==d.trigger.apply(i,n))){if(!o&&!d.noBubble&&!y(i)){for(l=d.delegateType||m,wt.test(l+m)||(s=s.parentNode);s;s=s.parentNode)v.push(s),u=s;u===(i.ownerDocument||r)&&v.push(u.defaultView||u.parentWindow||e)}a=0;while((s=v[a++])&&!t.isPropagationStopped())h=s,t.type=a>1?l:d.bindType||m,(p=(J.get(s,"events")||{})[t.type]&&J.get(s,"handle"))&&p.apply(s,n),(p=c&&s[c])&&p.apply&&Y(s)&&(t.result=p.apply(s,n),!1===t.result&&t.preventDefault());return t.type=m,o||t.isDefaultPrevented()||d._default&&!1!==d._default.apply(v.pop(),n)||!Y(i)||c&&g(i[m])&&!y(i)&&((u=i[c])&&(i[c]=null),w.event.triggered=m,t.isPropagationStopped()&&h.addEventListener(m,Tt),i[m](),t.isPropagationStopped()&&h.removeEventListener(m,Tt),w.event.triggered=void 0,u&&(i[c]=u)),t.result}},simulate:function(e,t,n){var r=w.extend(new w.Event,n,{type:e,isSimulated:!0});w.event.trigger(r,null,t)}}),w.fn.extend({trigger:function(e,t){return this.each(function(){w.event.trigger(e,t,this)})},triggerHandler:function(e,t){var n=this[0];if(n)return w.event.trigger(e,t,n,!0)}}),h.focusin||w.each({focus:"focusin",blur:"focusout"},function(e,t){var n=function(e){w.event.simulate(t,e.target,w.event.fix(e))};w.event.special[t]={setup:function(){var r=this.ownerDocument||this,i=J.access(r,t);i||r.addEventListener(e,n,!0),J.access(r,t,(i||0)+1)},teardown:function(){var r=this.ownerDocument||this,i=J.access(r,t)-1;i?J.access(r,t,i):(r.removeEventListener(e,n,!0),J.remove(r,t))}}});var Ct=e.location,Et=Date.now(),kt=/\?/;w.parseXML=function(t){var n;if(!t||"string"!=typeof t)return null;try{n=(new e.DOMParser).parseFromString(t,"text/xml")}catch(e){n=void 0}return n&&!n.getElementsByTagName("parsererror").length||w.error("Invalid XML: "+t),n};var St=/\[\]$/,Dt=/\r?\n/g,Nt=/^(?:submit|button|image|reset|file)$/i,At=/^(?:input|select|textarea|keygen)/i;function jt(e,t,n,r){var i;if(Array.isArray(t))w.each(t,function(t,i){n||St.test(e)?r(e,i):jt(e+"["+("object"==typeof i&&null!=i?t:"")+"]",i,n,r)});else if(n||"object"!==x(t))r(e,t);else for(i in t)jt(e+"["+i+"]",t[i],n,r)}w.param=function(e,t){var n,r=[],i=function(e,t){var n=g(t)?t():t;r[r.length]=encodeURIComponent(e)+"="+encodeURIComponent(null==n?"":n)};if(Array.isArray(e)||e.jquery&&!w.isPlainObject(e))w.each(e,function(){i(this.name,this.value)});else for(n in e)jt(n,e[n],t,i);return r.join("&")},w.fn.extend({serialize:function(){return w.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var e=w.prop(this,"elements");return e?w.makeArray(e):this}).filter(function(){var e=this.type;return this.name&&!w(this).is(":disabled")&&At.test(this.nodeName)&&!Nt.test(e)&&(this.checked||!pe.test(e))}).map(function(e,t){var n=w(this).val();return null==n?null:Array.isArray(n)?w.map(n,function(e){return{name:t.name,value:e.replace(Dt,"\r\n")}}):{name:t.name,value:n.replace(Dt,"\r\n")}}).get()}});var qt=/%20/g,Lt=/#.*$/,Ht=/([?&])_=[^&]*/,Ot=/^(.*?):[ \t]*([^\r\n]*)$/gm,Pt=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Mt=/^(?:GET|HEAD)$/,Rt=/^\/\//,It={},Wt={},$t="*/".concat("*"),Bt=r.createElement("a");Bt.href=Ct.href;function Ft(e){return function(t,n){"string"!=typeof t&&(n=t,t="*");var r,i=0,o=t.toLowerCase().match(M)||[];if(g(n))while(r=o[i++])"+"===r[0]?(r=r.slice(1)||"*",(e[r]=e[r]||[]).unshift(n)):(e[r]=e[r]||[]).push(n)}}function _t(e,t,n,r){var i={},o=e===Wt;function a(s){var u;return i[s]=!0,w.each(e[s]||[],function(e,s){var l=s(t,n,r);return"string"!=typeof l||o||i[l]?o?!(u=l):void 0:(t.dataTypes.unshift(l),a(l),!1)}),u}return a(t.dataTypes[0])||!i["*"]&&a("*")}function zt(e,t){var n,r,i=w.ajaxSettings.flatOptions||{};for(n in t)void 0!==t[n]&&((i[n]?e:r||(r={}))[n]=t[n]);return r&&w.extend(!0,e,r),e}function Xt(e,t,n){var r,i,o,a,s=e.contents,u=e.dataTypes;while("*"===u[0])u.shift(),void 0===r&&(r=e.mimeType||t.getResponseHeader("Content-Type"));if(r)for(i in s)if(s[i]&&s[i].test(r)){u.unshift(i);break}if(u[0]in n)o=u[0];else{for(i in n){if(!u[0]||e.converters[i+" "+u[0]]){o=i;break}a||(a=i)}o=o||a}if(o)return o!==u[0]&&u.unshift(o),n[o]}function Ut(e,t,n,r){var i,o,a,s,u,l={},c=e.dataTypes.slice();if(c[1])for(a in e.converters)l[a.toLowerCase()]=e.converters[a];o=c.shift();while(o)if(e.responseFields[o]&&(n[e.responseFields[o]]=t),!u&&r&&e.dataFilter&&(t=e.dataFilter(t,e.dataType)),u=o,o=c.shift())if("*"===o)o=u;else if("*"!==u&&u!==o){if(!(a=l[u+" "+o]||l["* "+o]))for(i in l)if((s=i.split(" "))[1]===o&&(a=l[u+" "+s[0]]||l["* "+s[0]])){!0===a?a=l[i]:!0!==l[i]&&(o=s[0],c.unshift(s[1]));break}if(!0!==a)if(a&&e["throws"])t=a(t);else try{t=a(t)}catch(e){return{state:"parsererror",error:a?e:"No conversion from "+u+" to "+o}}}return{state:"success",data:t}}w.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:Ct.href,type:"GET",isLocal:Pt.test(Ct.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":$t,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":JSON.parse,"text xml":w.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(e,t){return t?zt(zt(e,w.ajaxSettings),t):zt(w.ajaxSettings,e)},ajaxPrefilter:Ft(It),ajaxTransport:Ft(Wt),ajax:function(t,n){"object"==typeof t&&(n=t,t=void 0),n=n||{};var i,o,a,s,u,l,c,f,p,d,h=w.ajaxSetup({},n),g=h.context||h,y=h.context&&(g.nodeType||g.jquery)?w(g):w.event,v=w.Deferred(),m=w.Callbacks("once memory"),x=h.statusCode||{},b={},T={},C="canceled",E={readyState:0,getResponseHeader:function(e){var t;if(c){if(!s){s={};while(t=Ot.exec(a))s[t[1].toLowerCase()]=t[2]}t=s[e.toLowerCase()]}return null==t?null:t},getAllResponseHeaders:function(){return c?a:null},setRequestHeader:function(e,t){return null==c&&(e=T[e.toLowerCase()]=T[e.toLowerCase()]||e,b[e]=t),this},overrideMimeType:function(e){return null==c&&(h.mimeType=e),this},statusCode:function(e){var t;if(e)if(c)E.always(e[E.status]);else for(t in e)x[t]=[x[t],e[t]];return this},abort:function(e){var t=e||C;return i&&i.abort(t),k(0,t),this}};if(v.promise(E),h.url=((t||h.url||Ct.href)+"").replace(Rt,Ct.protocol+"//"),h.type=n.method||n.type||h.method||h.type,h.dataTypes=(h.dataType||"*").toLowerCase().match(M)||[""],null==h.crossDomain){l=r.createElement("a");try{l.href=h.url,l.href=l.href,h.crossDomain=Bt.protocol+"//"+Bt.host!=l.protocol+"//"+l.host}catch(e){h.crossDomain=!0}}if(h.data&&h.processData&&"string"!=typeof h.data&&(h.data=w.param(h.data,h.traditional)),_t(It,h,n,E),c)return E;(f=w.event&&h.global)&&0==w.active++&&w.event.trigger("ajaxStart"),h.type=h.type.toUpperCase(),h.hasContent=!Mt.test(h.type),o=h.url.replace(Lt,""),h.hasContent?h.data&&h.processData&&0===(h.contentType||"").indexOf("application/x-www-form-urlencoded")&&(h.data=h.data.replace(qt,"+")):(d=h.url.slice(o.length),h.data&&(h.processData||"string"==typeof h.data)&&(o+=(kt.test(o)?"&":"?")+h.data,delete h.data),!1===h.cache&&(o=o.replace(Ht,"$1"),d=(kt.test(o)?"&":"?")+"_="+Et+++d),h.url=o+d),h.ifModified&&(w.lastModified[o]&&E.setRequestHeader("If-Modified-Since",w.lastModified[o]),w.etag[o]&&E.setRequestHeader("If-None-Match",w.etag[o])),(h.data&&h.hasContent&&!1!==h.contentType||n.contentType)&&E.setRequestHeader("Content-Type",h.contentType),E.setRequestHeader("Accept",h.dataTypes[0]&&h.accepts[h.dataTypes[0]]?h.accepts[h.dataTypes[0]]+("*"!==h.dataTypes[0]?", "+$t+"; q=0.01":""):h.accepts["*"]);for(p in h.headers)E.setRequestHeader(p,h.headers[p]);if(h.beforeSend&&(!1===h.beforeSend.call(g,E,h)||c))return E.abort();if(C="abort",m.add(h.complete),E.done(h.success),E.fail(h.error),i=_t(Wt,h,n,E)){if(E.readyState=1,f&&y.trigger("ajaxSend",[E,h]),c)return E;h.async&&h.timeout>0&&(u=e.setTimeout(function(){E.abort("timeout")},h.timeout));try{c=!1,i.send(b,k)}catch(e){if(c)throw e;k(-1,e)}}else k(-1,"No Transport");function k(t,n,r,s){var l,p,d,b,T,C=n;c||(c=!0,u&&e.clearTimeout(u),i=void 0,a=s||"",E.readyState=t>0?4:0,l=t>=200&&t<300||304===t,r&&(b=Xt(h,E,r)),b=Ut(h,b,E,l),l?(h.ifModified&&((T=E.getResponseHeader("Last-Modified"))&&(w.lastModified[o]=T),(T=E.getResponseHeader("etag"))&&(w.etag[o]=T)),204===t||"HEAD"===h.type?C="nocontent":304===t?C="notmodified":(C=b.state,p=b.data,l=!(d=b.error))):(d=C,!t&&C||(C="error",t<0&&(t=0))),E.status=t,E.statusText=(n||C)+"",l?v.resolveWith(g,[p,C,E]):v.rejectWith(g,[E,C,d]),E.statusCode(x),x=void 0,f&&y.trigger(l?"ajaxSuccess":"ajaxError",[E,h,l?p:d]),m.fireWith(g,[E,C]),f&&(y.trigger("ajaxComplete",[E,h]),--w.active||w.event.trigger("ajaxStop")))}return E},getJSON:function(e,t,n){return w.get(e,t,n,"json")},getScript:function(e,t){return w.get(e,void 0,t,"script")}}),w.each(["get","post"],function(e,t){w[t]=function(e,n,r,i){return g(n)&&(i=i||r,r=n,n=void 0),w.ajax(w.extend({url:e,type:t,dataType:i,data:n,success:r},w.isPlainObject(e)&&e))}}),w._evalUrl=function(e){return w.ajax({url:e,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,"throws":!0})},w.fn.extend({wrapAll:function(e){var t;return this[0]&&(g(e)&&(e=e.call(this[0])),t=w(e,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){var e=this;while(e.firstElementChild)e=e.firstElementChild;return e}).append(this)),this},wrapInner:function(e){return g(e)?this.each(function(t){w(this).wrapInner(e.call(this,t))}):this.each(function(){var t=w(this),n=t.contents();n.length?n.wrapAll(e):t.append(e)})},wrap:function(e){var t=g(e);return this.each(function(n){w(this).wrapAll(t?e.call(this,n):e)})},unwrap:function(e){return this.parent(e).not("body").each(function(){w(this).replaceWith(this.childNodes)}),this}}),w.expr.pseudos.hidden=function(e){return!w.expr.pseudos.visible(e)},w.expr.pseudos.visible=function(e){return!!(e.offsetWidth||e.offsetHeight||e.getClientRects().length)},w.ajaxSettings.xhr=function(){try{return new e.XMLHttpRequest}catch(e){}};var Vt={0:200,1223:204},Gt=w.ajaxSettings.xhr();h.cors=!!Gt&&"withCredentials"in Gt,h.ajax=Gt=!!Gt,w.ajaxTransport(function(t){var n,r;if(h.cors||Gt&&!t.crossDomain)return{send:function(i,o){var a,s=t.xhr();if(s.open(t.type,t.url,t.async,t.username,t.password),t.xhrFields)for(a in t.xhrFields)s[a]=t.xhrFields[a];t.mimeType&&s.overrideMimeType&&s.overrideMimeType(t.mimeType),t.crossDomain||i["X-Requested-With"]||(i["X-Requested-With"]="XMLHttpRequest");for(a in i)s.setRequestHeader(a,i[a]);n=function(e){return function(){n&&(n=r=s.onload=s.onerror=s.onabort=s.ontimeout=s.onreadystatechange=null,"abort"===e?s.abort():"error"===e?"number"!=typeof s.status?o(0,"error"):o(s.status,s.statusText):o(Vt[s.status]||s.status,s.statusText,"text"!==(s.responseType||"text")||"string"!=typeof s.responseText?{binary:s.response}:{text:s.responseText},s.getAllResponseHeaders()))}},s.onload=n(),r=s.onerror=s.ontimeout=n("error"),void 0!==s.onabort?s.onabort=r:s.onreadystatechange=function(){4===s.readyState&&e.setTimeout(function(){n&&r()})},n=n("abort");try{s.send(t.hasContent&&t.data||null)}catch(e){if(n)throw e}},abort:function(){n&&n()}}}),w.ajaxPrefilter(function(e){e.crossDomain&&(e.contents.script=!1)}),w.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(e){return w.globalEval(e),e}}}),w.ajaxPrefilter("script",function(e){void 0===e.cache&&(e.cache=!1),e.crossDomain&&(e.type="GET")}),w.ajaxTransport("script",function(e){if(e.crossDomain){var t,n;return{send:function(i,o){t=w("<script>").prop({charset:e.scriptCharset,src:e.url}).on("load error",n=function(e){t.remove(),n=null,e&&o("error"===e.type?404:200,e.type)}),r.head.appendChild(t[0])},abort:function(){n&&n()}}}});var Yt=[],Qt=/(=)\?(?=&|$)|\?\?/;w.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Yt.pop()||w.expando+"_"+Et++;return this[e]=!0,e}}),w.ajaxPrefilter("json jsonp",function(t,n,r){var i,o,a,s=!1!==t.jsonp&&(Qt.test(t.url)?"url":"string"==typeof t.data&&0===(t.contentType||"").indexOf("application/x-www-form-urlencoded")&&Qt.test(t.data)&&"data");if(s||"jsonp"===t.dataTypes[0])return i=t.jsonpCallback=g(t.jsonpCallback)?t.jsonpCallback():t.jsonpCallback,s?t[s]=t[s].replace(Qt,"$1"+i):!1!==t.jsonp&&(t.url+=(kt.test(t.url)?"&":"?")+t.jsonp+"="+i),t.converters["script json"]=function(){return a||w.error(i+" was not called"),a[0]},t.dataTypes[0]="json",o=e[i],e[i]=function(){a=arguments},r.always(function(){void 0===o?w(e).removeProp(i):e[i]=o,t[i]&&(t.jsonpCallback=n.jsonpCallback,Yt.push(i)),a&&g(o)&&o(a[0]),a=o=void 0}),"script"}),h.createHTMLDocument=function(){var e=r.implementation.createHTMLDocument("").body;return e.innerHTML="<form></form><form></form>",2===e.childNodes.length}(),w.parseHTML=function(e,t,n){if("string"!=typeof e)return[];"boolean"==typeof t&&(n=t,t=!1);var i,o,a;return t||(h.createHTMLDocument?((i=(t=r.implementation.createHTMLDocument("")).createElement("base")).href=r.location.href,t.head.appendChild(i)):t=r),o=A.exec(e),a=!n&&[],o?[t.createElement(o[1])]:(o=xe([e],t,a),a&&a.length&&w(a).remove(),w.merge([],o.childNodes))},w.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return s>-1&&(r=vt(e.slice(s)),e=e.slice(0,s)),g(t)?(n=t,t=void 0):t&&"object"==typeof t&&(i="POST"),a.length>0&&w.ajax({url:e,type:i||"GET",dataType:"html",data:t}).done(function(e){o=arguments,a.html(r?w("<div>").append(w.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},w.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){w.fn[t]=function(e){return this.on(t,e)}}),w.expr.pseudos.animated=function(e){return w.grep(w.timers,function(t){return e===t.elem}).length},w.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l,c=w.css(e,"position"),f=w(e),p={};"static"===c&&(e.style.position="relative"),s=f.offset(),o=w.css(e,"top"),u=w.css(e,"left"),(l=("absolute"===c||"fixed"===c)&&(o+u).indexOf("auto")>-1)?(a=(r=f.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),g(t)&&(t=t.call(e,n,w.extend({},s))),null!=t.top&&(p.top=t.top-s.top+a),null!=t.left&&(p.left=t.left-s.left+i),"using"in t?t.using.call(e,p):f.css(p)}},w.fn.extend({offset:function(e){if(arguments.length)return void 0===e?this:this.each(function(t){w.offset.setOffset(this,e,t)});var t,n,r=this[0];if(r)return r.getClientRects().length?(t=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:t.top+n.pageYOffset,left:t.left+n.pageXOffset}):{top:0,left:0}},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===w.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===w.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=w(e).offset()).top+=w.css(e,"borderTopWidth",!0),i.left+=w.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-w.css(r,"marginTop",!0),left:t.left-i.left-w.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===w.css(e,"position"))e=e.offsetParent;return e||be})}}),w.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(e,t){var n="pageYOffset"===t;w.fn[e]=function(r){return z(this,function(e,r,i){var o;if(y(e)?o=e:9===e.nodeType&&(o=e.defaultView),void 0===i)return o?o[t]:e[r];o?o.scrollTo(n?o.pageXOffset:i,n?i:o.pageYOffset):e[r]=i},e,r,arguments.length)}}),w.each(["top","left"],function(e,t){w.cssHooks[t]=_e(h.pixelPosition,function(e,n){if(n)return n=Fe(e,t),We.test(n)?w(e).position()[t]+"px":n})}),w.each({Height:"height",Width:"width"},function(e,t){w.each({padding:"inner"+e,content:t,"":"outer"+e},function(n,r){w.fn[r]=function(i,o){var a=arguments.length&&(n||"boolean"!=typeof i),s=n||(!0===i||!0===o?"margin":"border");return z(this,function(t,n,i){var o;return y(t)?0===r.indexOf("outer")?t["inner"+e]:t.document.documentElement["client"+e]:9===t.nodeType?(o=t.documentElement,Math.max(t.body["scroll"+e],o["scroll"+e],t.body["offset"+e],o["offset"+e],o["client"+e])):void 0===i?w.css(t,n,s):w.style(t,n,i,s)},t,a?i:void 0,a)}})}),w.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,t){w.fn[t]=function(e,n){return arguments.length>0?this.on(t,null,e,n):this.trigger(t)}}),w.fn.extend({hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),w.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)}}),w.proxy=function(e,t){var n,r,i;if("string"==typeof t&&(n=e[t],t=e,e=n),g(e))return r=o.call(arguments,2),i=function(){return e.apply(t||this,r.concat(o.call(arguments)))},i.guid=e.guid=e.guid||w.guid++,i},w.holdReady=function(e){e?w.readyWait++:w.ready(!0)},w.isArray=Array.isArray,w.parseJSON=JSON.parse,w.nodeName=N,w.isFunction=g,w.isWindow=y,w.camelCase=G,w.type=x,w.now=Date.now,w.isNumeric=function(e){var t=w.type(e);return("number"===t||"string"===t)&&!isNaN(e-parseFloat(e))},"function"==typeof define&&define.amd&&define("jquery",[],function(){return w});var Jt=e.jQuery,Kt=e.$;return w.noConflict=function(t){return e.$===w&&(e.$=Kt),t&&e.jQuery===w&&(e.jQuery=Jt),w},t||(e.jQuery=e.$=w),w}); diff --git a/old/server/app/static/js/vendor/nanobar.min.js b/old/server/app/static/js/vendor/nanobar.min.js deleted file mode 100644 index 9329eadc..00000000 --- a/old/server/app/static/js/vendor/nanobar.min.js +++ /dev/null @@ -1,3 +0,0 @@ -var Nanobar=function(){var c,d,e,f,g,h,k={width:"100%",height:"4px",zIndex:9999,top:"0"},l={width:0,height:"100%",clear:"both",transition:"height .3s"};c=function(a,b){for(var c in b)a.style[c]=b[c];a.style["float"]="left"};f=function(){var a=this,b=this.width-this.here;0.1>b&&-0.1<b?(g.call(this,this.here),this.moving=!1,100==this.width&&(this.el.style.height=0,setTimeout(function(){a.cont.el.removeChild(a.el)},300))):(g.call(this,this.width-b/4),setTimeout(function(){a.go()},16))};g=function(a){this.width= -a;this.el.style.width=this.width+"%"};h=function(){var a=new d(this);this.bars.unshift(a)};d=function(a){this.el=document.createElement("div");this.el.style.backgroundColor=a.opts.bg;this.here=this.width=0;this.moving=!1;this.cont=a;c(this.el,l);a.el.appendChild(this.el)};d.prototype.go=function(a){a?(this.here=a,this.moving||(this.moving=!0,f.call(this))):this.moving&&f.call(this)};e=function(a){a=this.opts=a||{};var b;a.bg=a.bg||"#000";this.bars=[];b=this.el=document.createElement("div");c(this.el, -k);a.id&&(b.id=a.id);b.style.position=a.target?"relative":"fixed";a.target?a.target.insertBefore(b,a.target.firstChild):document.getElementsByTagName("body")[0].appendChild(b);h.call(this)};e.prototype.go=function(a){this.bars[0].go(a);100==a&&h.call(this)};return e}();
\ No newline at end of file diff --git a/old/server/app/static/js/vendor/prefixfree.js b/old/server/app/static/js/vendor/prefixfree.js deleted file mode 100644 index 40b6e693..00000000 --- a/old/server/app/static/js/vendor/prefixfree.js +++ /dev/null @@ -1,527 +0,0 @@ -/** - * StyleFix 1.0.3 & PrefixFree 1.0.7 - * @author Lea Verou - * MIT license - */ - -(function(){ - -if(!window.addEventListener) { - return; -} - -var self = window.StyleFix = { - link: function(link) { - var url = link.href || link.getAttribute('data-href'); - try { - // Ignore stylesheets with data-noprefix attribute as well as alternate stylesheets or without (data-)href attribute - if(!url || link.rel !== 'stylesheet' || link.hasAttribute('data-noprefix')) { - return; - } - } - catch(e) { - return; - } - - var base = url.replace(/[^\/]+$/, ''), - base_scheme = (/^[a-z]{3,10}:/.exec(base) || [''])[0], - base_domain = (/^[a-z]{3,10}:\/\/[^\/]+/.exec(base) || [''])[0], - base_query = /^([^?]*)\??/.exec(url)[1], - parent = link.parentNode, - xhr = new XMLHttpRequest(), - process; - - xhr.onreadystatechange = function() { - if(xhr.readyState === 4) { - process(); - } - }; - - process = function() { - var css = xhr.responseText; - - if(css && link.parentNode && (!xhr.status || xhr.status < 400 || xhr.status > 600)) { - css = self.fix(css, true, link); - - // Convert relative URLs to absolute, if needed - if(css && base) { - css = css.replace(/url\(\s*?((?:"|')?)(.+?)\1\s*?\)/gi, function($0, quote, url) { - if(/^([a-z]{3,10}:|#)/i.test(url)) { // Absolute & or hash-relative - return $0; - } - else if(/^\/\//.test(url)) { // Scheme-relative - // May contain sequences like /../ and /./ but those DO work - return 'url("' + base_scheme + url + '")'; - } - else if(/^\//.test(url)) { // Domain-relative - return 'url("' + base_domain + url + '")'; - } - else if(/^\?/.test(url)) { // Query-relative - return 'url("' + base_query + url + '")'; - } - else { - // Path-relative - return 'url("' + base + url + '")'; - } - }); - - // behavior URLs shoudn’t be converted (Issue #19) - // base should be escaped before added to RegExp (Issue #81) - var escaped_base = base.replace(/([\\\^\$*+[\]?{}.=!:(|)])/g,"\\$1"); - css = css.replace(RegExp('\\b(behavior:\\s*?url\\(\'?"?)' + escaped_base, 'gi'), '$1'); - } - - var style = document.createElement('style'); - style.textContent = '/*# sourceURL='+link.getAttribute('href')+' */\n/*@ sourceURL='+link.getAttribute('href')+' */\n' + css; - style.media = link.media; - style.disabled = link.disabled; - style.setAttribute('data-href', link.getAttribute('href')); - - if(link.id) style.id = link.id; - - parent.insertBefore(style, link); - parent.removeChild(link); - - style.media = link.media; // Duplicate is intentional. See issue #31 - } - }; - - try { - xhr.open('GET', url); - xhr.send(null); - } catch (e) { - // Fallback to XDomainRequest if available - if (typeof XDomainRequest != "undefined") { - xhr = new XDomainRequest(); - xhr.onerror = xhr.onprogress = function() {}; - xhr.onload = process; - xhr.open("GET", url); - xhr.send(null); - } - } - - link.setAttribute('data-inprogress', ''); - }, - - styleElement: function(style) { - if (style.hasAttribute('data-noprefix')) { - return; - } - var disabled = style.disabled; - - style.textContent = self.fix(style.textContent, true, style); - - style.disabled = disabled; - }, - - styleAttribute: function(element) { - var css = element.getAttribute('style'); - - css = self.fix(css, false, element); - - element.setAttribute('style', css); - }, - - process: function() { - // Linked stylesheets - $('link[rel="stylesheet"]:not([data-inprogress])').forEach(StyleFix.link); - - // Inline stylesheets - $('style').forEach(StyleFix.styleElement); - - // Inline styles - $('[style]').forEach(StyleFix.styleAttribute); - - var event = document.createEvent('Event'); - event.initEvent('StyleFixProcessed', true, true); - document.dispatchEvent(event); - - }, - - register: function(fixer, index) { - (self.fixers = self.fixers || []) - .splice(index === undefined? self.fixers.length : index, 0, fixer); - }, - - fix: function(css, raw, element) { - if(self.fixers) { - for(var i=0; i<self.fixers.length; i++) { - css = self.fixers[i](css, raw, element) || css; - } - } - - return css; - }, - - camelCase: function(str) { - return str.replace(/-([a-z])/g, function($0, $1) { return $1.toUpperCase(); }).replace('-',''); - }, - - deCamelCase: function(str) { - return str.replace(/[A-Z]/g, function($0) { return '-' + $0.toLowerCase() }); - } -}; - -/************************************** - * Process styles - **************************************/ -(function(){ - setTimeout(function(){ - $('link[rel="stylesheet"]').forEach(StyleFix.link); - }, 10); - - document.addEventListener('DOMContentLoaded', StyleFix.process, false); -})(); - -function $(expr, con) { - return [].slice.call((con || document).querySelectorAll(expr)); -} - -})(); - -/** - * PrefixFree - */ -(function(root){ - -if(!window.StyleFix || !window.getComputedStyle) { - return; -} - -// Private helper -function fix(what, before, after, replacement, css) { - what = self[what]; - - if(what.length) { - var regex = RegExp(before + '(' + what.join('|') + ')' + after, 'gi'); - - css = css.replace(regex, replacement); - } - - return css; -} - -var self = window.PrefixFree = { - prefixCSS: function(css, raw, element) { - var prefix = self.prefix; - - // Gradient angles hotfix - if(self.functions.indexOf('linear-gradient') > -1) { - // Gradients are supported with a prefix, convert angles to legacy - css = css.replace(/(\s|:|,)(repeating-)?linear-gradient\(\s*(-?\d*\.?\d*)deg/ig, function ($0, delim, repeating, deg) { - return delim + (repeating || '') + 'linear-gradient(' + (90-deg) + 'deg'; - }); - } - - css = fix('functions', '(\\s|:|,)', '\\s*\\(', '$1' + prefix + '$2(', css); - css = fix('keywords', '(\\s|:)', '(\\s|;|\\}|$)', '$1' + prefix + '$2$3', css); - css = fix('properties', '(^|\\{|\\s|;)', '\\s*:', '$1' + prefix + '$2:', css); - - // Prefix properties *inside* values (issue #8) - if (self.properties.length) { - var regex = RegExp('\\b(' + self.properties.join('|') + ')(?!:)', 'gi'); - - css = fix('valueProperties', '\\b', ':(.+?);', function($0) { - return $0.replace(regex, prefix + "$1") - }, css); - } - - if(raw) { - css = fix('selectors', '', '\\b', self.prefixSelector, css); - css = fix('atrules', '@', '\\b', '@' + prefix + '$1', css); - } - - // Fix double prefixing - css = css.replace(RegExp('-' + prefix, 'g'), '-'); - - // Prefix wildcard - css = css.replace(/-\*-(?=[a-z]+)/gi, self.prefix); - - return css; - }, - - property: function(property) { - return (self.properties.indexOf(property) >=0 ? self.prefix : '') + property; - }, - - value: function(value, property) { - value = fix('functions', '(^|\\s|,)', '\\s*\\(', '$1' + self.prefix + '$2(', value); - value = fix('keywords', '(^|\\s)', '(\\s|$)', '$1' + self.prefix + '$2$3', value); - - if(self.valueProperties.indexOf(property) >= 0) { - value = fix('properties', '(^|\\s|,)', '($|\\s|,)', '$1'+self.prefix+'$2$3', value); - } - - return value; - }, - - prefixSelector: function(selector) { - return self.selectorMap[selector] || selector - }, - - // Warning: Prefixes no matter what, even if the property is supported prefix-less - prefixProperty: function(property, camelCase) { - var prefixed = self.prefix + property; - - return camelCase? StyleFix.camelCase(prefixed) : prefixed; - } -}; - -/************************************** - * Properties - **************************************/ -(function() { - var prefixes = {}, - properties = [], - shorthands = {}, - style = getComputedStyle(document.documentElement, null), - dummy = document.createElement('div').style; - - // Why are we doing this instead of iterating over properties in a .style object? Because Webkit. - // 1. Older Webkit won't iterate over those. - // 2. Recent Webkit will, but the 'Webkit'-prefixed properties are not enumerable. The 'webkit' - // (lower case 'w') ones are, but they don't `deCamelCase()` into a prefix that we can detect. - - var iterate = function(property) { - if(property.charAt(0) === '-') { - properties.push(property); - - var parts = property.split('-'), - prefix = parts[1]; - - // Count prefix uses - prefixes[prefix] = ++prefixes[prefix] || 1; - - // This helps determining shorthands - while(parts.length > 3) { - parts.pop(); - - var shorthand = parts.join('-'); - - if(supported(shorthand) && properties.indexOf(shorthand) === -1) { - properties.push(shorthand); - } - } - } - }, - supported = function(property) { - return StyleFix.camelCase(property) in dummy; - } - - // Some browsers have numerical indices for the properties, some don't - if(style && style.length > 0) { - for(var i=0; i<style.length; i++) { - iterate(style[i]) - } - } - else { - for(var property in style) { - iterate(StyleFix.deCamelCase(property)); - } - } - - // Find most frequently used prefix - var highest = {uses:0}; - for(var prefix in prefixes) { - var uses = prefixes[prefix]; - - if(highest.uses < uses) { - highest = {prefix: prefix, uses: uses}; - } - } - - self.prefix = '-' + highest.prefix + '-'; - self.Prefix = StyleFix.camelCase(self.prefix); - - self.properties = []; - - // Get properties ONLY supported with a prefix - for(var i=0; i<properties.length; i++) { - var property = properties[i]; - - if(property.indexOf(self.prefix) === 0) { // we might have multiple prefixes, like Opera - var unprefixed = property.slice(self.prefix.length); - - if(!supported(unprefixed)) { - self.properties.push(unprefixed); - } - } - } - - // IE fix - if(self.Prefix == 'Ms' - && !('transform' in dummy) - && !('MsTransform' in dummy) - && ('msTransform' in dummy)) { - self.properties.push('transform', 'transform-origin'); - } - - self.properties.sort(); -})(); - -/************************************** - * Values - **************************************/ -(function() { -// Values that might need prefixing -var functions = { - 'linear-gradient': { - property: 'backgroundImage', - params: 'red, teal' - }, - 'calc': { - property: 'width', - params: '1px + 5%' - }, - 'element': { - property: 'backgroundImage', - params: '#foo' - }, - 'cross-fade': { - property: 'backgroundImage', - params: 'url(a.png), url(b.png), 50%' - }, - 'image-set': { - property: 'backgroundImage', - params: 'url(a.png) 1x, url(b.png) 2x' - } -}; - - -functions['repeating-linear-gradient'] = -functions['repeating-radial-gradient'] = -functions['radial-gradient'] = -functions['linear-gradient']; - -// Note: The properties assigned are just to *test* support. -// The keywords will be prefixed everywhere. -var keywords = { - 'initial': 'color', - 'grab': 'cursor', - 'grabbing': 'cursor', - 'zoom-in': 'cursor', - 'zoom-out': 'cursor', - 'box': 'display', - 'flexbox': 'display', - 'inline-flexbox': 'display', - 'flex': 'display', - 'inline-flex': 'display', - 'grid': 'display', - 'inline-grid': 'display', - 'max-content': 'width', - 'min-content': 'width', - 'fit-content': 'width', - 'fill-available': 'width', - 'contain-floats': 'width' -}; - -self.functions = []; -self.keywords = []; - -var style = document.createElement('div').style; - -function supported(value, property) { - style[property] = ''; - style[property] = value; - - return !!style[property]; -} - -for (var func in functions) { - var test = functions[func], - property = test.property, - value = func + '(' + test.params + ')'; - - if (!supported(value, property) - && supported(self.prefix + value, property)) { - // It's supported, but with a prefix - self.functions.push(func); - } -} - -for (var keyword in keywords) { - var property = keywords[keyword]; - - if (!supported(keyword, property) - && supported(self.prefix + keyword, property)) { - // It's supported, but with a prefix - self.keywords.push(keyword); - } -} - -})(); - -/************************************** - * Selectors and @-rules - **************************************/ -(function() { - -var -selectors = { - ':any-link': null, - '::backdrop': null, - ':fullscreen': null, - ':full-screen': ':fullscreen', - //sigh - '::placeholder': null, - ':placeholder': '::placeholder', - '::input-placeholder': '::placeholder', - ':input-placeholder': '::placeholder', - ':read-only': null, - ':read-write': null, - '::selection': null -}, - -atrules = { - 'keyframes': 'name', - 'viewport': null, - 'document': 'regexp(".")' -}; - -self.selectors = []; -self.selectorMap = {}; -self.atrules = []; - -var style = root.appendChild(document.createElement('style')); - -function supported(selector) { - style.textContent = selector + '{}'; // Safari 4 has issues with style.innerHTML - - return !!style.sheet.cssRules.length; -} - -for(var selector in selectors) { - var standard = selectors[selector] || selector - var prefixed = selector.replace(/::?/, function($0) { return $0 + self.prefix }) - if(!supported(standard) && supported(prefixed)) { - self.selectors.push(standard); - self.selectorMap[standard] = prefixed; - } -} - -for(var atrule in atrules) { - var test = atrule + ' ' + (atrules[atrule] || ''); - - if(!supported('@' + test) && supported('@' + self.prefix + test)) { - self.atrules.push(atrule); - } -} - -root.removeChild(style); - -})(); - -// Properties that accept properties as their value -self.valueProperties = [ - 'transition', - 'transition-property', - 'will-change' -] - -// Add class for current prefix -root.className += ' ' + self.prefix; - -StyleFix.register(self.prefixCSS); - - -})(document.documentElement); diff --git a/old/server/app/templates/403.html b/old/server/app/templates/403.html deleted file mode 100644 index f83c6dfc..00000000 --- a/old/server/app/templates/403.html +++ /dev/null @@ -1,35 +0,0 @@ -{%- extends "base.html" %} - -{% import "bootstrap/utils.html" as utils %} - -{% block content %} -<div class="container"> - - <div class="row"> - <h1><a href="/"><img src="/static/img/dulldream_logo_200.png"></a></h1> - <h2 class="subtitle">403</h2> - </div> - - - <div class="row"> - <div class="col-sm-12 align_center"> - <p>Your request could not be handled</p> - </div> - </div> - - <div id="about_btn" class="row"> - <div class="col-sm-12"> - <div class="align_center"> - <a class="btn btn-sm btn-default" href="/" role="button">Home</a> - </div> - </div> - </div> - -</div> - -{% block footer %} -{{super()}} -{% endblock %} - -{% endblock %} - diff --git a/old/server/app/templates/404.html b/old/server/app/templates/404.html deleted file mode 100644 index a7f7d45a..00000000 --- a/old/server/app/templates/404.html +++ /dev/null @@ -1,33 +0,0 @@ -{%- extends "base.html" %} - -{% import "bootstrap/utils.html" as utils %} - -{% block content %} -<div class="container"> - - <div class="row"> - <h1><a href="/"><img src="/static/img/dulldream_logo_200.png"></a></h1> - <h2 class="subtitle">404</h2> - </div> - - <div class="row"> - <div class="col-sm-12 align_center"> - <p>Your request could not be handled</p> - </div> - - <div id="about_btn" class="row"> - <div class="col-sm-12"> - <div class="align_center"> - <a class="btn btn-sm btn-default" href="/" role="button">Home</a> - </div> - </div> - </div> - -</div> - -{% block footer %} -{{super()}} -{% endblock %} - -{% endblock %} - diff --git a/old/server/app/templates/500.html b/old/server/app/templates/500.html deleted file mode 100644 index b323c12c..00000000 --- a/old/server/app/templates/500.html +++ /dev/null @@ -1,34 +0,0 @@ -{%- extends "base.html" %} - -{% import "bootstrap/utils.html" as utils %} - -{% block content %} -<div class="container"> - - <div class="row"> - <h1><a href="/"><img src="/static/img/dulldream_logo_200.png"></a></h1> - <h2 class="subtitle">505</h2> - </div> - - <div class="row"> - <div class="col-sm-12 align_center"> - <p>Your request could not be handled</p> - </div> - </div> - - <div id="about_btn" class="row"> - <div class="col-sm-12"> - <div class="align_center"> - <a class="btn btn-sm btn-default" href="/" role="button">Home</a> - </div> - </div> - </div> - -</div> - -{% block footer %} -{{super()}} -{% endblock %} - -{% endblock %} - diff --git a/old/server/app/templates/base.html b/old/server/app/templates/base.html deleted file mode 100644 index 16402af8..00000000 --- a/old/server/app/templates/base.html +++ /dev/null @@ -1,33 +0,0 @@ -{%- extends "bootstrap/base.html" %} - -{% block title %}DullDream (v2 x ZkM){% endblock %} - -{% block head %} - {{super()}} - <link rel="shortcut icon" href="{{url_for('static', filename='img/favicon.ico')}}"> -{% endblock %} - -{% block styles %} - {{super()}} - <link rel="stylesheet" type="text/css" href="{{url_for('static', filename='css/bootstrap.min.css')}}"> - <link rel="stylesheet" type="text/css" href="{{url_for('static', filename='css/dullbrown-theme.css')}}"> -{% endblock %} - -{%- block content %} - {{super()}} -{% endblock content %} - -{%- block footer %} - <div id="footer" class="footer"> - <div class="container"> - <p class=""> - DullDream™ (beta) by <a href="http://constantdullaart.com">Constant Dullaart</a>. - Made in collaboration with <a href="http://ahprojects.com">Adam Harvey</a> - </p> - </div> - </div> -{% endblock footer %} - -{% block scripts %} - {{super()}} -{% endblock scripts %}
\ No newline at end of file diff --git a/old/server/app/templates/celery.html b/old/server/app/templates/celery.html deleted file mode 100644 index ddcd25cb..00000000 --- a/old/server/app/templates/celery.html +++ /dev/null @@ -1,43 +0,0 @@ -{%- extends "base.html" %} - -{% import "bootstrap/utils.html" as utils %} - -{% block content %} -<style> - .progress { - width: 100%; - text-align: center; - } -</style> - -<div class="container"> - - <div class="row"> - <h1><a href="/"><img src="/static/img/dulldream_logo_200.png"></a></h1> - <h2 class="subtitle">Celery Test</h2> - </div> - - <div class="row"> - <div class="col-sm-12 align_center"> - <button id="submit-data">Submit Data</button><br><br> - <div id="progress"></div> - </div> - </div> - -</div> - -{% block footer %} -{{ super() }} -{% endblock %} - -{% endblock %} - - -{% block scripts %} - {{super()}} - -<script type="text/javascript" src="{{url_for('static', filename='js/old-js/nanobar.min.js')}}"></script> -<script type="text/javascript" src="{{url_for('static', filename='js/old-js/celery.js')}}"></script> - - -{% endblock scripts %}
\ No newline at end of file diff --git a/old/server/app/templates/display.html b/old/server/app/templates/display.html deleted file mode 100644 index f73a6ca5..00000000 --- a/old/server/app/templates/display.html +++ /dev/null @@ -1,69 +0,0 @@ -{%- extends "base.html" %} - -{% import "bootstrap/utils.html" as utils %} - -{% block content %} - -<style type="text/css"> - .display_im{ - width:100%; - max-width: 360px; - min-width: 256px; - margin-bottom: 10px; - } - .display_left{ - - } - .display_right{ - - } - .caption{ - font-size: 11px; - } -</style> -<div class="container"> - - <div class="row"> - <h1><a href="/"><img src="/static/img/dulldream_logo_200.png"></a></h1> - <h2 class="subtitle">Result</h2> - </div> - - <div class="row"> - <div style="height:50px"></div> - <div class="col-sm-12" style="text-align: center;"> - <img class="display_im" src="{{ url_for('main.get_image', imtype='renders', uuid_name='{}'.format( uuid_name )) }}" /> - <p class="caption">Rendered result</p> - </div> - </div> - - <div class="col-sm-12" style="text-align: center;"> - <img class="display_im" src="{{ url_for('main.get_image', imtype='uploads', uuid_name='{}'.format( uuid_name )) }}" /> - <p class="caption">Original image</p> - </div> - </div> - - <div class="col-sm-12" style="text-align: center;"> - <img class="display_im" src="{{ url_for('main.get_image', imtype='fcn', uuid_name='{}'.format( uuid_name )) }}" /> - <p class="caption">Semantic segmentation</p> - </div> - </div> - - </div> - - <div id="about_btn" class="row"> - <div class="col-sm-12"> - <div class="align_center"> - <a class="btn btn-sm btn-default" href="/" role="button">Home</a> - </div> - </div> - </div> - - </div> -</div> - -{% block footer %} -{{super()}} -{% endblock footer %} - -{% endblock %} - diff --git a/old/server/app/templates/index.html b/old/server/app/templates/index.html deleted file mode 100644 index f740bb5b..00000000 --- a/old/server/app/templates/index.html +++ /dev/null @@ -1,161 +0,0 @@ -<!doctype html> -<html> -<head> - <meta charset="utf-8"> - <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" /> - <link rel="shortcut icon" href="/static/img/favicon.ico" /> - <title>DullDream (v2 x ZkM)</title> - <link rel="stylesheet" type="text/css" href="{{url_for('static', filename='css/dullbrown-theme.css')}}"> -</head> -<body> - -<header> - <h1><a href="/"><img src="/static/img/dulldream_logo_200.png" alt="DullDream"></a></h1> - <h2 class="subtitle">Neural network photo effect</h2> -</header> - -<div class="upload_view container"> - <div class="row"> - <div id="photo_area" class="dash_border"> - <input class="hidden_input" id="user_file" type="file" accept="image/*"> - <canvas class="photo" id="user_photo_canvas" width="512" height="512"></canvas> - <div class="center_inner"> - <label id="take_photo_btn" for="user_file" class="upload_center_btn"> - <div class='btn-lg btn'>Take Photo</div> - </label> - <div id="details"></div> - <div id="progress"></div> - </div> - - <div id="preloader_anim"> - <img src="/static/img/loader.gif"> - </div> - </div> - </div> - - <div id="upload_controls" class="row"> - <div class="align_center"> - <div id="restart_btn"> - <a id="restart_btn" class="btn btn-md btn-default" role="button">Change Image</a> - <input type='file' accept="image/*"> - </div> - <div id="dropdown_btn"> - <select id="dropdown"></select> - </div> - <div id="upload_btn"> - <a id="take_photo_btn" class="btn btn-md btn-important" role="button">Upload</a> - </div> - </div> - <div class="align_center consent_box"> - <label> - <input type="checkbox" id="agree" value="1" checked> - I consent to have my dulled image displayed at ZkM. - </label> - </div> - </div> - - <div id="about_btn" class="row"> - <div class="align_center"> - <a class="btn btn-sm btn-default about_button" role="button">About</a> - <a class="btn btn-sm btn-default privacy_button" role="button">Privacy</a> - <p class="notice"> - All images uploaded can be used for exhibition and review purposes. - </p> - <p class="notice"> - Currently this work is on view at <a href="http://zkm.de/en/event/2017/10/open-codes">ZKM</a>. View recent DullDreams <a href="/gallery">here</a>. - </p> - </div> - </div> -</div> - -<div class="about_view modal"> - <div class="inner"> - <header> - <h1><img src="/static/img/dulldream_logo_200.png" alt="DullDream"></h1> - </header> - <div class='content'> - <p> - <b><i>DullDream™ by DullTech™</i></b> is a series of experiments appropriating neural network image recognition technology to make visual representation less interesting. - </p> - <p> - Can machine learning help us desensitize? Our impactful lives are clogging up social media feeds with unique filter settings, leaving us nostalgic for a vanilla future. Can machine learning help us achieve this? Take the excitement out of our lives, prepare us for a time where we will all have to be the same, have the same values and culture? Painting a future where the Dull is no longer a dream but a nightmare? - </p> - <p> - DullDream™ (version 2) was developed for the OpenCodes exhibition at ZKM. It based on the original DullDream™ (<a href="http://dulldream.xyz">version 1</a>), developed for Transmediale 2017 - Ever Elusive by <a href="http://constantdullaart.com">Constant Dullaart</a> in collaboration with <a href="http://ahprojects.com">Adam Harvey</a>. DullDream (V2) has been generously made possible by support from ZKM. - </p> - </div> - <center><a class="btn btn-sm btn-default" href="/" role="button">Home</a></center> - </div> -</div> - -<div class="privacy_view modal"> - <div class="inner"> - <header> - <h1><img src="/static/img/dulldream_logo_200.png" alt="DullDream"></h1> - </header> - <div class='content'> - <h3>Privacy Notice</h3> - <p> - Images uploaded to this site are being used for a public art display at <a href="http://zkm.de/en/event/2017/10/open-codes">ZKM</a> - </p> - <p> - If you would not like to be included, be sure to uncheck the permission box on the upload page. - </p> - - </div> - <center><a class="btn btn-sm btn-default" href="/" role="button">Home</a></center> - </div> -</div> - - -<div class="result_view"> - <div class="final_result"> - </div> - - <div class="row made_with"> - Made with DullDream.xyz for ZKM OpenCodes 2017 - </div> - - <div class="row"> - <button class='btn' id="show_all_results">Detailed Analysis</button> - </div> - - <div class="all_results"> - </div> - - <div id="share_btns" class="row"> - <a id="permalink" href="#">Permalink</a> - </div> - - <div id="about_btn" class="row"> - <div class="align_center"> - <a href="/" class="btn btn-sm btn-default home_button" role="button">Home</a> - <a class="btn btn-sm btn-default about_button" role="button">About</a> - <a class="btn btn-sm btn-default privacy_button" role="button">Privacy</a> - </div> - - </div> - -</div> - -<div id="footer"> - DullDream™ by <a href="http://constantdullaart.com">Constant Dullaart</a>.<br> - <small>Made in collaboration with <a href="http://ahprojects.com">Adam Harvey</a></small> -</div> - -</body> -<script type="text/html" id="result_template"> - <div class="row"> - <img src="{img}"><br> - <b>{title}</b> - </div> -</script> -<script type="text/json" id="dropdown_options">{{ task_json }}</script> -<script type="text/javascript" src="{{url_for('static', filename='js/vendor/jquery-3.3.1.min.js')}}"></script> -<script type="text/javascript" src="{{url_for('static', filename='js/vendor/ExifReader.js')}}"></script> -<script type="text/javascript" src="{{url_for('static', filename='js/vendor/canvas-to-blob.js')}}"></script> -<script type="text/javascript" src="{{url_for('static', filename='js/vendor/prefixfree.js')}}"></script> -<script type="text/javascript" src="{{url_for('static', filename='js/util.js')}}"></script> -<script type="text/javascript" src="{{url_for('static', filename='js/upload.js')}}"></script> -<script type="text/javascript" src="{{url_for('static', filename='js/app.js')}}"></script> -</html>
\ No newline at end of file diff --git a/old/server/celery_worker.py b/old/server/celery_worker.py deleted file mode 100644 index 1545a884..00000000 --- a/old/server/celery_worker.py +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env python -import os -from app.basemodels import celery -from app import create_app - -app = create_app(os.getenv('FLASK_CONFIG') or 'default') -app.app_context().push() diff --git a/old/server/config.py b/old/server/config.py deleted file mode 100644 index 5042efb6..00000000 --- a/old/server/config.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- coding: utf-8 -*- -"""Application configuration.""" -import os -from os.path import join -basedir = os.path.abspath(os.path.dirname(__file__)) - -class Config(object): - """Base configuration.""" - - #SECRET_KEY = os.environ.get('MYFLASKAPP_SECRET', 'secret-key') # TODO: Change me - APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory - PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir)) - #BCRYPT_LOG_ROUNDS = 13 - DEBUG_TB_ENABLED = False # Disable Debug toolbar - #DEBUG_TB_INTERCEPT_REDIRECTS = False - CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc. - HOST = '0.0.0.0' - FLASK_DEBUG_DISABLE_STRICT = True - #WTF_CSRF_SECRET_KEY = '94ksadkf49DKEDFJ.&' - BOOTSTRAP_GOOGLE_ANALYTICS_ACCOUNT = None - BOOTSTRAP_SERVE_LOCAL = True - SECRET_KEY = os.environ.get('SECRET_KEY') or '94ksadkf49DKEDFJ.&' - CELERY_BROKER_URL = 'redis://localhost:6379/0' - CELERY_RESULT_BACKEND = 'redis://localhost:6379/0' - - FLASKY_SLOW_DB_QUERY_TIME=0.5 - - @staticmethod - def init_app(app): - pass - - -class DevelopmentConfig(Config): - """Development configuration.""" - ENV = 'dev' - DEBUG = True - -class ProductionConfig(Config): - ENV = 'production' - DEBUG = False - # @classmethod - # def init_app(cls, app): - # Config.init_app(app) - # # import logging - # # app.logger.addHandler(mail_handler) - - -class DigitalOceanConfig(Config): - """Production configuration.""" - def init_app(cls, app): - ProductionConfig.init_app(app) - # log to syslog - import logging - from logging.handlers import SysLogHandler - syslog_handler = SysLogHandler() - syslog_handler.setLevel(logging.WARNING) - app.logger.addHandler(syslog_handler) - - -class UnixConfig(ProductionConfig): - @classmethod - def init_app(cls, app): - ProductionConfig.init_app(app) - - # log to syslog - import logging - from logging.handlers import SysLogHandler - syslog_handler = SysLogHandler() - syslog_handler.setLevel(logging.WARNING) - app.logger.addHandler(syslog_handler) - - -config = { - 'development': DevelopmentConfig, - 'production': ProductionConfig, - 'digitalocean': DigitalOceanConfig, - 'default': DevelopmentConfig -} diff --git a/old/server/deploy.sh b/old/server/deploy.sh deleted file mode 100755 index c2594cab..00000000 --- a/old/server/deploy.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -d_src="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"/ -d_dst=/home/dull/dulldream/www/dulldream_xyz -ssh_alis=dulldream-root -#DOCKER_DIR_P1="$(dirname "$CWD")" - -echo "Syncing DullDream LOCAL to -> REMOTE" -echo $d_src -echo $d_dst - - -#rsync -a -e 'ssh' \ -rsync -r -v --progress -e 'ssh' \ - --delete \ - --exclude='.DS_Store' \ - --exclude='deploy.sh' \ - $d_src $ssh_alis:$d_dst - -echo "Synced :)"
\ No newline at end of file diff --git a/old/server/dulldream.wsgi.py b/old/server/dulldream.wsgi.py deleted file mode 100644 index ed992528..00000000 --- a/old/server/dulldream.wsgi.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/python -import sys -sys.path.insert(0, "/home/dulldream/dulldream/www/dulldream_xyz/") - -from app import create_app - -import logging -logging.basicConfig(stream=sys.stderr) -# logging.basicConfig(filename='error.log',level=logging.DEBUG) - -application = create_app('production') -application.secret_key = 'curlier6982!1decentralizationists' - diff --git a/old/server/run-celery.sh b/old/server/run-celery.sh deleted file mode 100755 index e38174fa..00000000 --- a/old/server/run-celery.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -celery worker -A celery_worker.celery --loglevel=info - diff --git a/old/server/run-dev.sh b/old/server/run-dev.sh deleted file mode 100755 index b4eb2a61..00000000 --- a/old/server/run-dev.sh +++ /dev/null @@ -1 +0,0 @@ -FLASK_CONFIG=development python run.py
\ No newline at end of file diff --git a/old/server/run-gunicorn.sh b/old/server/run-gunicorn.sh deleted file mode 100755 index 64debabd..00000000 --- a/old/server/run-gunicorn.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -gunicorn -w 1 -b 0.0.0.0:8000 run:app diff --git a/old/server/run-redis.sh b/old/server/run-redis.sh deleted file mode 100755 index e9ceb845..00000000 --- a/old/server/run-redis.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -/usr/local/bin/redis-server /etc/redis/redis.conf diff --git a/old/server/run.py b/old/server/run.py deleted file mode 100644 index ff2d5009..00000000 --- a/old/server/run.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/python3 -import os -from flask import Flask -from app import create_app - -app = create_app(os.getenv('FLASK_CONFIG') or 'default') -import logging -logging.basicConfig(filename='error.log',level=logging.DEBUG) - -if __name__ == '__main__': - app.run(host='0.0.0.0', debug=True, threaded=False, port=8000) - pass diff --git a/package-lock.json b/package-lock.json index 187e090a..a4d114fc 100644 --- a/package-lock.json +++ b/package-lock.json @@ -7163,6 +7163,66 @@ } } }, + "react-router": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-4.3.1.tgz", + "integrity": "sha512-yrvL8AogDh2X42Dt9iknk4wF4V8bWREPirFfS9gLU1huk6qK41sg7Z/1S81jjTrGHxa3B8R3J6xIkDAA6CVarg==", + "requires": { + "history": "^4.7.2", + "hoist-non-react-statics": "^2.5.0", + "invariant": "^2.2.4", + "loose-envify": "^1.3.1", + "path-to-regexp": "^1.7.0", + "prop-types": "^15.6.1", + "warning": "^4.0.1" + }, + "dependencies": { + "isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=" + }, + "path-to-regexp": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.7.0.tgz", + "integrity": "sha1-Wf3g9DW62suhA6hOnTvGTpa5k30=", + "requires": { + "isarray": "0.0.1" + } + }, + "warning": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/warning/-/warning-4.0.3.tgz", + "integrity": "sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==", + "requires": { + "loose-envify": "^1.0.0" + } + } + } + }, + "react-router-dom": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-4.3.1.tgz", + "integrity": "sha512-c/MlywfxDdCp7EnB7YfPMOfMD3tOtIjrQlj/CKfNMBxdmpJP8xcz5P/UAFn3JbnQCNUxsHyVVqllF9LhgVyFCA==", + "requires": { + "history": "^4.7.2", + "invariant": "^2.2.4", + "loose-envify": "^1.3.1", + "prop-types": "^15.6.1", + "react-router": "^4.3.1", + "warning": "^4.0.1" + }, + "dependencies": { + "warning": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/warning/-/warning-4.0.3.tgz", + "integrity": "sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==", + "requires": { + "loose-envify": "^1.0.0" + } + } + } + }, "react-spin": { "version": "0.6.2", "resolved": "https://registry.npmjs.org/react-spin/-/react-spin-0.6.2.tgz", diff --git a/package.json b/package.json index 6ff9dac5..0cc38ef6 100644 --- a/package.json +++ b/package.json @@ -9,7 +9,8 @@ "build": "NODE_ENV=production webpack --config ./webpack.config.prod.js", "deploy": "NODE_ENV=production webpack --config ./webpack.config.prod.js && git commit -am 'deploy' && git push origin master && ssh vframe@vframe ./restart.sh", "watchSplash": "NODE_ENV=development webpack --config ./webpack.splash.dev.js --colors --watch", - "buildSplash": "NODE_ENV=production webpack --config ./webpack.splash.prod.js" + "buildSplash": "NODE_ENV=production webpack --config ./webpack.splash.prod.js", + "watchGeocode": "NODE_ENV=development webpack --config ./webpack.geocode.dev.js --colors --watch" }, "repository": { "type": "git", @@ -48,6 +49,7 @@ "react-dom": "^16.3.0", "react-hot-loader": "^4.3.0", "react-redux": "^5.0.7", + "react-router-dom": "^4.3.1", "react-spin": "^0.6.2", "redux": "^4.0.0", "redux-thunk": "^2.3.0", diff --git a/scraper/.gitignore b/scraper/.gitignore new file mode 100644 index 00000000..868c3dd4 --- /dev/null +++ b/scraper/.gitignore @@ -0,0 +1,4 @@ +datasets/s2 +datasets/old +datasets/scholar_entries.numbers +datasets/scholar_entries.csv diff --git a/scraper/reports/datasets b/scraper/reports/datasets new file mode 120000 index 00000000..ed9c23bf --- /dev/null +++ b/scraper/reports/datasets @@ -0,0 +1 @@ +../../site/datasets/
\ No newline at end of file diff --git a/scraper/reports/geocode_papers.html b/scraper/reports/geocode_papers.html index 529ee9c7..84ffe356 100644 --- a/scraper/reports/geocode_papers.html +++ b/scraper/reports/geocode_papers.html @@ -33,5 +33,6 @@ html,body { margin: 0; padding: 0; width: 100%; height: 100%; } <div id="container"> </div> </body> +<script src="/reports/geocode-app.js"></script> </html> diff --git a/scraper/reports/paper_title_report.html b/scraper/reports/paper_title_report.html index 51d5204e..90deaf36 100644 --- a/scraper/reports/paper_title_report.html +++ b/scraper/reports/paper_title_report.html @@ -1,9 +1,3 @@ -<!doctype html><html><head><meta charset='utf-8'><title>Paper Title Sanity Check</title><link rel='stylesheet' href='reports.css'></head><body><h2>Paper Title Sanity Check</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>3d_rma</td><td>3D-RMA</td><td>Automatic 3D Face Authentication</td><td>Automatic 3D face authentication</td><td><a href="http://pdfs.semanticscholar.org/2160/788824c4c29ffe213b2cbeb3f52972d73f37.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic 3d face authentication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>A 3 D Dynamic Database for Unconstrained Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td></tr><tr><td>3dpes</td><td>3DPeS</td><td>3DPes: 3D People Dataset for Surveillance and Forensics</td><td>3DPeS: 3D people dataset for surveillance and forensics</td><td><a href="http://doi.acm.org/10.1145/2072572.2072590" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=3dpes: 3d people dataset for surveillance and forensics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td></tr><tr><td>4dfab</td><td>4DFAB</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=4dfab: a large scale 4d facial expression database for biometric applications&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td></tr><tr><td>50_people_one_question</td><td>50 People One Question</td><td>Merging Pose Estimates Across Space and Time</td><td>Merging Pose Estimates Across Space and Time</td><td><a href="http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merging pose estimates across space and time&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td></tr><tr><td>a_pascal_yahoo</td><td>aPascal</td><td>Describing Objects by their Attributes</td><td>Describing objects by their attributes</td><td><a href="http://www-2.cs.cmu.edu/~dhoiem/publications/cvpr2009_attributes.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing objects by their attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td></tr><tr><td>adience</td><td>Adience</td><td>Age and Gender Estimation of Unfiltered Faces</td><td>Age and Gender Estimation of Unfiltered Faces</td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=age and gender estimation of unfiltered faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td><a href="http://pdfs.semanticscholar.org/2624/d84503bc2f8e190e061c5480b6aa4d89277a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=afew-va database for valence and arousal estimation in-the-wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>Collecting Large, Richly Annotated Facial-Expression Databases from Movies</td><td>Collecting Large, Richly Annotated Facial-Expression Databases from Movies</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=collecting large, richly annotated facial-expression databases from movies&sort=relevance" target="_blank">[s2]</a></td><td>Australian National University</td><td>b1f4423c227fa37b9680787be38857069247a307</td></tr><tr><td>affectnet</td><td>AffectNet</td><td>AffectNet: A New Database for Facial Expression, Valence, and Arousal Computation in the Wild</td><td>Skybiometry and AffectNet on Facial Emotion Recognition Using Supervised Machine Learning Algorithms</td><td><a href="http://dl.acm.org/citation.cfm?id=3232665" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectnet: a new database for facial expression, valence, and arousal computation in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f152b6ee251cca940dd853c54e6a7b78fbc6b235</td></tr><tr><td>aflw</td><td>AFLW</td><td>Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=annotated facial landmarks in the wild: a large-scale, real-world database for facial landmark localization&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>afw</td><td>AFW</td><td>Face detection, pose estimation and landmark localization in the wild</td><td>Face detection, pose estimation, and landmark localization in the wild</td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face detection, pose estimation and landmark localization in the wild&sort=relevance" target="_blank">[s2]</a></td><td>University of California, Irvine</td><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td></tr><tr><td>agedb</td><td>AgeDB</td><td>AgeDB: the first manually collected, in-the-wild age database</td><td>AgeDB: The First Manually Collected, In-the-Wild Age Database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=agedb: the first manually collected, in-the-wild age database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</td><td><a href="https://arxiv.org/pdf/1605.09653.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>apis</td><td>APiS1.0</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>ar_facedb</td><td>AR Face</td><td>The AR Face Database</td><td>The AR face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the ar face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td></tr><tr><td>awe_ears</td><td>AWE Ears</td><td>Ear Recognition: More Than a Survey</td><td>Ear Recognition: More Than a Survey</td><td><a href="http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ear recognition: more than a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>84fe5b4ac805af63206012d29523a1e033bc827e</td></tr><tr><td>b3d_ac</td><td>B3D(AC)</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3-d audio-visual corpus of affective communication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td></tr><tr><td>bbc_pose</td><td>BBC Pose</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td><a href="https://doi.org/10.1007/s11263-013-0672-6" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic and efficient human pose estimation for sign language videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>213a579af9e4f57f071b884aa872651372b661fd</td></tr><tr><td>berkeley_pose</td><td>BPAD</td><td>Describing People: A Poselet-Based Approach to Attribute Classification</td><td>Describing people: A poselet-based approach to attribute classification</td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing people: a poselet-based approach to attribute classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td></tr><tr><td>bfm</td><td>BFM</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/6399/37b3a1b8bded3f7e9a40e85bd3770016cf3c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d face model for pose and illumination invariant face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td></tr><tr><td>bio_id</td><td>BioID Face</td><td>Robust Face Detection Using the Hausdorff Distance</td><td>Robust Face Detection Using the Hausdorff Distance</td><td><a href="http://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face detection using the hausdorff distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4053e3423fb70ad9140ca89351df49675197196a</td></tr><tr><td>bjut_3d</td><td>BJUT-3D</td><td>The BJUT-3D Large-Scale Chinese Face Database</td><td>A novel face recognition method based on 3D face model</td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the bjut-3d large-scale chinese face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td></tr><tr><td>bosphorus</td><td>The Bosphorus</td><td>Bosphorus Database for 3D Face Analysis</td><td>Bosphorus Database for 3D Face Analysis</td><td><a href="http://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=bosphorus database for 3d face analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2acf7e58f0a526b957be2099c10aab693f795973</td></tr><tr><td>bp4d_plus</td><td>BP4D+</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal spontaneous emotion corpus for human behavior analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td></tr><tr><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td>A high resolution spontaneous 3D dynamic facial expression database</td><td>A high-resolution spontaneous 3D dynamic facial expression database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a high resolution spontaneous 3d dynamic facial expression database&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Binghamton</td><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td></tr><tr><td>brainwash</td><td>Brainwash</td><td>Brainwash dataset</td><td>Brainwash: A Data System for Feature Engineering</td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=brainwash dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td></tr><tr><td>bu_3dfe</td><td>BU-3DFE</td><td>A 3D Facial Expression Database For Facial Behavior Research</td><td>A 3D facial expression database for facial behavior research</td><td><a href="http://www.cs.binghamton.edu/~lijun/Research/3DFE/Yin_FGR06_a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d facial expression database for facial behavior research&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Binghamton</td><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td></tr><tr><td>buhmap_db</td><td>BUHMAP-DB</td><td>Facial Feature Tracking and Expression Recognition for Sign Language</td><td>Facial feature tracking and expression recognition for sign language</td><td><a href="http://www.cmpe.boun.edu.tr/pilab/pilabfiles/databases/buhmap/files/ari2008facialfeaturetracking.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial feature tracking and expression recognition for sign language&sort=relevance" target="_blank">[s2]</a></td><td></td><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td></tr><tr><td>cafe</td><td>CAFE</td><td>The Child Affective Facial Expression (CAFE) Set: Validity and reliability from untrained adults</td><td>The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</td><td><a href="http://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the child affective facial expression (cafe) set: validity and reliability from untrained adults&sort=relevance" target="_blank">[s2]</a></td><td></td><td>20388099cc415c772926e47bcbbe554e133343d1</td></tr><tr><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td>Pruning Training Sets for Learning of Object Categories</td><td>Pruning training sets for learning of object categories</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pruning training sets for learning of object categories&sort=relevance" target="_blank">[s2]</a></td><td></td><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: A Benchmark</td><td>Pedestrian detection: A benchmark</td><td><a href="http://vision.ucsd.edu/~pdollar/files/papers/DollarCVPR09peds.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: an evaluation of the state of the art&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td></tr><tr><td>camel</td><td>CAMEL</td><td>CAMEL Dataset for Visual and Thermal Infrared Multiple Object Detection and Tracking</td><td>Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=camel dataset for visual and thermal infrared multiple object detection and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td></tr><tr><td>cas_peal</td><td>CAS-PEAL</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cas-peal large-scale chinese face database and baseline evaluations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>casia_webface</td><td>CASIA Webface</td><td>Learning Face Representation from Scratch</td><td>Learning Face Representation from Scratch</td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning face representation from scratch&sort=relevance" target="_blank">[s2]</a></td><td></td><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td></tr><tr><td>celeba</td><td>CelebA</td><td>Deep Learning Face Attributes in the Wild</td><td>Deep Learning Face Attributes in the Wild</td><td><a href="http://arxiv.org/pdf/1411.7766v2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep learning face attributes in the wild&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td></tr><tr><td>celeba_plus</td><td>CelebFaces+</td><td>Deep Learning Face Representation from Predicting 10,000 Classes</td><td>Learning Deep Representation for Imbalanced Classification</td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2016_imbalanced.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep learning face representation from predicting 10,000 classes&sort=relevance" target="_blank">[s2]</a></td><td>Shenzhen Institutes of Advanced Technology</td><td>69a68f9cf874c69e2232f47808016c2736b90c35</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>chalearn</td><td>ChaLearn</td><td>ChaLearn Looking at People: A Review of Events and Resources</td><td>ChaLearn looking at people: A review of events and resources</td><td><a href="https://arxiv.org/pdf/1701.02664.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=chalearn looking at people: a review of events and resources&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td></tr><tr><td>chokepoint</td><td>ChokePoint</td><td>Patch-based Probabilistic Image Quality Assessment for Face Selection and Improved Video-based Face Recognition</td><td>Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</td><td><a href="http://conradsanderson.id.au/pdfs/wong_face_selection_cvpr_biometrics_2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=patch-based probabilistic image quality assessment for face selection and improved video-based face recognition&sort=relevance" target="_blank">[s2]</a></td><td>University of Queensland</td><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td><a href="https://arxiv.org/pdf/1604.01685.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset for semantic urban scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td><a href="https://arxiv.org/pdf/1604.01685.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td></tr><tr><td>clothing_co_parsing</td><td>CCP</td><td>Clothing Co-Parsing by Joint Image Segmentation and Labeling</td><td>Clothing Co-parsing by Joint Image Segmentation and Labeling</td><td><a href="https://arxiv.org/pdf/1502.00739v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing co-parsing by joint image segmentation and labeling&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2bf8541199728262f78d4dced6fb91479b39b738</td></tr><tr><td>cmdp</td><td>CMDP</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td><a href="http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=distance estimation of an unknown person from a portrait&sort=relevance" target="_blank">[s2]</a></td><td>California Institute of Technology</td><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>coco</td><td>COCO</td><td>Microsoft COCO: Common Objects in Context</td><td>Microsoft COCO Captions: Data Collection and Evaluation Server</td><td><a href="http://pdfs.semanticscholar.org/ba95/81c33a7eebe87c50e61763e4c8d1723538f2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=microsoft coco: common objects in context&sort=relevance" target="_blank">[s2]</a></td><td></td><td>696ca58d93f6404fea0fc75c62d1d7b378f47628</td></tr><tr><td>coco_action</td><td>COCO-a</td><td>Describing Common Human Visual Actions in Images</td><td>Describing Common Human Visual Actions in Images</td><td><a href="http://pdfs.semanticscholar.org/b38d/cf5fa5174c0d718d65cc4f3889b03c4a21df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing common human visual actions in images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td></tr><tr><td>coco_qa</td><td>COCO QA</td><td>Exploring Models and Data for Image Question Answering</td><td>Exploring Models and Data for Image Question Answering</td><td><a href="http://pdfs.semanticscholar.org/aa79/9c29c0d44ece1864467af520fe70540c069b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring models and data for image question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td></tr><tr><td>cofw</td><td>COFW</td><td>Robust face landmark estimation under occlusion</td><td>Robust Face Landmark Estimation under Occlusion</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face landmark estimation under occlusion&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2724ba85ec4a66de18da33925e537f3902f21249</td></tr><tr><td>cohn_kanade</td><td>CK</td><td>Comprehensive Database for Facial Expression Analysis</td><td>Comprehensive Database for Facial Expression Analysis</td><td><a href="http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=comprehensive database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td>Carnegie Mellon University</td><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td></tr><tr><td>cohn_kanade_plus</td><td>CK+</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the extended cohn-kanade dataset (ck+): a complete dataset for action unit and emotion-specified expression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td></tr><tr><td>columbia_gaze</td><td>Columbia Gaze</td><td>Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction</td><td>A 3D Morphable Eye Region Model for Gaze Estimation</td><td><a href="https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gaze locking: passive eye contact detection for human–object interaction&sort=relevance" target="_blank">[s2]</a></td><td>Carnegie Mellon University</td><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td></tr><tr><td>complex_activities</td><td>Ongoing Complex Activities</td><td>Recognition of Ongoing Complex Activities by Sequence Prediction over a Hierarchical Label Space</td><td>Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477586" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition of ongoing complex activities by sequence prediction over a hierarchical label space&sort=relevance" target="_blank">[s2]</a></td><td></td><td>65355cbb581a219bd7461d48b3afd115263ea760</td></tr><tr><td>cuhk01</td><td>CUHK01</td><td>Human Reidentification with Transferred Metric Learning</td><td>Human Reidentification with Transferred Metric Learning</td><td><a href="http://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human reidentification with transferred metric learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td></tr><tr><td>cuhk02</td><td>CUHK02</td><td>Locally Aligned Feature Transforms across Views</td><td>Locally Aligned Feature Transforms across Views</td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=locally aligned feature transforms across views&sort=relevance" target="_blank">[s2]</a></td><td></td><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td></tr><tr><td>cuhk03</td><td>CUHK03</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepreid: deep filter pairing neural network for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On – board Pedestrian Detection</td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>czech_news_agency</td><td>UFI</td><td>Unconstrained Facial Images: Database for Face Recognition under Real-world Conditions</td><td>Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</td><td><a href="http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained facial images: database for face recognition under real-world conditions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b4106614c1d553365bad75d7866bff0de6056ed</td></tr><tr><td>d3dfacs</td><td>D3DFACS</td><td>A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling</td><td>A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a facs valid 3d dynamic action unit database with applications to 3d dynamic morphable facial modelling&sort=relevance" target="_blank">[s2]</a></td><td>Jacobs University</td><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>data_61</td><td>Data61 Pedestrian</td><td>A Multi-Modal Graphical Model for Scene Analysis</td><td>A Multi-modal Graphical Model for Scene Analysis</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2015.139" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-modal graphical model for scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>563c940054e4b456661762c1ab858e6f730c3159</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td>Fashion Landmark Detection in the Wild</td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepfashion: powering robust clothes recognition and retrieval with rich annotations&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>Fashion Landmark Detection in the Wild</td><td>Fashion Landmark Detection in the Wild</td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fashion landmark detection in the wild&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td></tr><tr><td>disfa</td><td>DISFA</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td>Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=disfa: a spontaneous facial action intensity database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a5acda0e8c0937bfed013e6382da127103e41395</td></tr><tr><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td>Nighttime Face Recognition at Long Distance: Cross-distance and Cross-spectral Matching</td><td>Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</td><td><a href="http://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=nighttime face recognition at long distance: cross-distance and cross-spectral matching&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>emotio_net</td><td>EmotioNet Database</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=emotionet: an accurate, real-time algorithm for the automatic annotation of a million facial expressions in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td></tr><tr><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td>Depth and Appearance for Mobile Scene Analysis</td><td>Depth and Appearance for Mobile Scene Analysis</td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07.pdf/at_download/file" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=depth and appearance for mobile scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td></tr><tr><td>europersons</td><td>EuroCity Persons</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the eurocity persons dataset: a novel benchmark for object detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td></tr><tr><td>expw</td><td>ExpW</td><td>Learning Social Relation Traits from Face Images</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="http://arxiv.org/abs/1609.06426" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>expw</td><td>ExpW</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="http://arxiv.org/abs/1609.06426" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>face_research_lab</td><td>Face Research Lab London</td><td>Face Research Lab London Set. figshare</td><td>Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face research lab london set. figshare&sort=relevance" target="_blank">[s2]</a></td><td>University College London</td><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td></tr><tr><td>face_scrub</td><td>FaceScrub</td><td>A data-driven approach to cleaning large face datasets</td><td>A data-driven approach to cleaning large face datasets</td><td><a href="https://doi.org/10.1109/ICIP.2014.7025068" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a data-driven approach to cleaning large face datasets&sort=relevance" target="_blank">[s2]</a></td><td>University of Illinois, Urbana-Champaign</td><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td>Face swapping: automatically replacing faces in photographs</td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facetracer: a search engine for large collections of images with faces&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>670637d0303a863c1548d5b19f705860a23e285c</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>Face Swapping: Automatically Replacing Faces in Photographs</td><td>Face swapping: automatically replacing faces in photographs</td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face swapping: automatically replacing faces in photographs&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>670637d0303a863c1548d5b19f705860a23e285c</td></tr><tr><td>facebook_100</td><td>Facebook100</td><td>Scaling Up Biologically-Inspired Computer Vision: A Case Study in Unconstrained Face Recognition on Facebook</td><td>Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scaling up biologically-inspired computer vision: a case study in unconstrained face recognition on facebook&sort=relevance" target="_blank">[s2]</a></td><td>Harvard University</td><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td></tr><tr><td>faceplace</td><td>Face Place</td><td>Recognizing disguised faces</td><td>Recognizing disguised faces</td><td><a href="http://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognizing disguised faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td></tr><tr><td>families_in_the_wild</td><td>FIW</td><td>Visual Kinship Recognition of Families in the Wild</td><td>Visual Kinship Recognition of Families in the Wild</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337841" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=visual kinship recognition of families in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td></tr><tr><td>fddb</td><td>FDDB</td><td>FDDB: A Benchmark for Face Detection in Unconstrained Settings</td><td>A Benchmark for Face Detection in Unconstrained Settings</td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fddb: a benchmark for face detection in unconstrained settings&sort=relevance" target="_blank">[s2]</a></td><td>University of Massachusetts</td><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>Generalização cartográfica automatizada para um banco de dados cadastral</td><td><a href="https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret verification testing protocol for face recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td><a href="http://pdfs.semanticscholar.org/dc8b/25e35a3acb812beb499844734081722319b4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret database and evaluation procedure for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dc8b25e35a3acb812beb499844734081722319b4</td></tr><tr><td>feret</td><td>FERET</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td><a href="http://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=feret ( face recognition technology ) recognition algorithm development and test results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret evaluation methodology for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td></tr><tr><td>ferplus</td><td>FER+</td><td>Training Deep Networks for Facial Expression Recognition with Crowd-Sourced Label Distribution</td><td>Training deep networks for facial expression recognition with crowd-sourced label distribution</td><td><a href="http://arxiv.org/pdf/1608.01041v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=training deep networks for facial expression recognition with crowd-sourced label distribution&sort=relevance" target="_blank">[s2]</a></td><td></td><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td></tr><tr><td>fia</td><td>CMU FiA</td><td>The CMU Face In Action (FIA) Database</td><td>The CMU Face In Action (FIA) Database</td><td><a href="http://pdfs.semanticscholar.org/bb47/a03401811f9d2ca2da12138697acbc7b97a3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu face in action (fia) database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 faces In-the-wild challenge: Database and results</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: database and results&sort=relevance" target="_blank">[s2]</a></td><td>University of Twente</td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: the first facial landmark localization challenge&sort=relevance" target="_blank">[s2]</a></td><td>University of Twente</td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>fiw_300</td><td>300-W</td><td>A semi-automatic methodology for facial landmark annotation</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a semi-automatic methodology for facial landmark annotation&sort=relevance" target="_blank">[s2]</a></td><td>University of Twente</td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>frav3d</td><td>FRAV3D</td><td>MULTIMODAL 2D, 2.5D & 3D FACE VERIFICATION</td><td>2D and 3D face recognition: A survey</td><td><a href="http://pdfs.semanticscholar.org/2f5d/44dc3e1b5955942133ff872ebd31716ec604.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal 2d, 2.5d & 3d face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2f5d44dc3e1b5955942133ff872ebd31716ec604</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>gallagher</td><td>Gallagher</td><td>Clothing Cosegmentation for Recognizing People</td><td>Clothing Cosegmentation for Shopping Images With Cluttered Background</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7423747" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing cosegmentation for recognizing people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dbe8e5121c534339d6e41f8683e85f87e6abf81</td></tr><tr><td>gavab_db</td><td>Gavab</td><td>GavabDB: a 3D face database</td><td>Expression invariant 3D face recognition with a Morphable Model</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gavabdb: a 3d face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>GeoFaceExplorer: Exploring the Geo-Dependence of Facial Attributes</td><td>GeoFaceExplorer: exploring the geo-dependence of facial attributes</td><td><a href="http://doi.acm.org/10.1145/2676440.2676443" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=geofaceexplorer: exploring the geo-dependence of facial attributes&sort=relevance" target="_blank">[s2]</a></td><td>University of Kentucky</td><td>17b46e2dad927836c689d6787ddb3387c6159ece</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>MAXIMUM LIKELIHOOD TRAINING OF THE EMBEDDED HMM FOR FACE DETECTION AND RECOGNITION Ara V. Ne an and Monson H. Hayes III Center for Signal and Image Processing School of Electrical and Computer Engineering</td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Generic Object Recognition with Boosting</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=generic object recognition with boosting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=weak hypotheses and boosting for generic object detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Object Recognition Using Segmentation for Feature Detection</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object recognition using segmentation for feature detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>h3d</td><td>H3D</td><td>Poselets: Body Part Detectors Trained Using 3D Human Pose Annotations</td><td>Poselets: Body part detectors trained using 3D human pose annotations</td><td><a href="http://vision.stanford.edu/teaching/cs231b_spring1213/papers/ICCV09_BourdevMalik.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=poselets: body part detectors trained using 3d human pose annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2830fb5282de23d7784b4b4bc37065d27839a412</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>The HDA+ data set for research on fully automated re-identification systems</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the hda+ data set for research on fully automated re-identification systems&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>helen</td><td>Helen</td><td>Interactive Facial Feature Localization</td><td>Interactive Facial Feature Localization</td><td><a href="http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=interactive facial feature localization&sort=relevance" target="_blank">[s2]</a></td><td>University of Illinois, Urbana-Champaign</td><td>95f12d27c3b4914e0668a268360948bce92f7db3</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>High-resolution comprehensive 3-D dynamic database for facial articulation analysis</td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td></tr><tr><td>hipsterwars</td><td>Hipsterwars</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td><a href="http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hipster wars: discovering elements of fashion styles&sort=relevance" target="_blank">[s2]</a></td><td>Tohoku University</td><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td></tr><tr><td>hollywood_headset</td><td>HollywoodHeads</td><td>Context-aware CNNs for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware cnns for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=is the eye region more reliable than the face? a preliminary study of face-based recognition on a transgender dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Investigating the Periocular-Based Face Recognition Across Gender Transformation</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating the periocular-based face recognition across gender transformation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Face recognition across gender transformation using SVM Classifier</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition across gender transformation using svm classifier&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database : A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database with age, pose and expression</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database with age, pose and expression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database and evaluation with a new detection algorithm&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td>Automated human identification using ear imaging</td><td>Automated Human Identification Using Ear Imaging</td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automated human identification using ear imaging&sort=relevance" target="_blank">[s2]</a></td><td></td><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td></tr><tr><td>ijb_c</td><td>IJB-A</td><td>Pushing the Frontiers of Unconstrained Face Detection and Recognition: IARPA Janus Benchmark A</td><td>Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the frontiers of unconstrained face detection and recognition: iarpa janus benchmark a&sort=relevance" target="_blank">[s2]</a></td><td></td><td>140c95e53c619eac594d70f6369f518adfea12ef</td></tr><tr><td>ijb_c</td><td>IJB-B</td><td>IARPA Janus Benchmark-B Face Dataset</td><td>IARPA Janus Benchmark-B Face Dataset</td><td><a href="http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark-b face dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>ilids_mcts</td><td></td><td>Imagery Library for Intelligent Detection Systems: -The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems: -the i-lids user guide&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ilids_vid_reid</td><td>iLIDS-VID</td><td>Person Re-Identication by Video Ranking</td><td>Person Re-identification by Exploiting Spatio-Temporal Cues and Multi-view Metric Learning</td><td><a href="https://doi.org/10.1109/LSP.2016.2574323" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identication by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>99eb4cea0d9bc9fe777a5c5172f8638a37a7f262</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>Deep expectation of real and apparent age from a single image without facial landmarks</td><td>Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</td><td><a href="https://doi.org/10.1007/s11263-016-0940-3" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep expectation of real and apparent age from a single image without facial landmarks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10195a163ab6348eef37213a46f60a3d87f289c5</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>DEX: Deep EXpectation of apparent age from a single image</td><td>DEX: Deep EXpectation of Apparent Age from a Single Image</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=dex: deep expectation of apparent age from a single image&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td></tr><tr><td>imfdb</td><td>IMFDB</td><td>Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations</td><td>Indian Movie Face Database: A benchmark for face recognition under wide variations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian movie face database: a benchmark for face recognition under wide variations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td></tr><tr><td>imm_face</td><td>IMM Face Dataset</td><td>The IMM Face Database - An Annotated Dataset of 240 Face Images</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the imm face database - an annotated dataset of 240 face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>immediacy</td><td>Immediacy</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.383" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-task recurrent neural network for immediacy prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td></tr><tr><td>imsitu</td><td>imSitu</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td><a href="http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=situation recognition: visual semantic role labeling for image understanding&sort=relevance" target="_blank">[s2]</a></td><td>University of Washington</td><td>51eba481dac6b229a7490f650dff7b17ce05df73</td></tr><tr><td>inria_person</td><td>INRIA Pedestrian</td><td>Histograms of Oriented Gradients for Human Detection</td><td>Histograms of oriented gradients for human detection</td><td><a href="http://nichol.as/papers/Dalai/Histograms%20of%20oriented%20gradients%20for%20human%20detection.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=histograms of oriented gradients for human detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td></tr><tr><td>jaffe</td><td>JAFFE</td><td>Coding Facial Expressions with Gabor Wavelets</td><td>Coding Facial Expressions with Gabor Wavelets</td><td><a href="http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=coding facial expressions with gabor wavelets&sort=relevance" target="_blank">[s2]</a></td><td>Kyushu University</td><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td></tr><tr><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td>The Jiku Mobile Video Dataset</td><td>A Synchronization Ground Truth for the Jiku Mobile Video Dataset</td><td><a href="http://pdfs.semanticscholar.org/ad62/c6e17bc39b4dec20d32f6ac667ae42d2c118.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the jiku mobile video dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ad62c6e17bc39b4dec20d32f6ac667ae42d2c118</td></tr><tr><td>jpl_pose</td><td>JPL-Interaction dataset</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Ryoo_First-Person_Activity_Recognition_2013_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=first-person activity recognition: what are they doing to me?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td></tr><tr><td>kdef</td><td>KDEF</td><td>The Karolinska Directed Emotional Faces – KDEF</td><td>Gaze fixation and the neural circuitry of face processing in autism</td><td><a href="http://doi.org/10.1038/nn1421" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the karolinska directed emotional faces – kdef&sort=relevance" target="_blank">[s2]</a></td><td></td><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Genealogical Face Recognition based on UB KinFace Database</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=genealogical face recognition based on ub kinface database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Kinship Verification through Transfer Learning</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinship verification through transfer learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Understanding Kin Relationships in a Photo</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding kin relationships in a photo&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kinectface</td><td>KinectFaceDB</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinectfacedb: a kinect database for face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>The Role of Machine Vision for Intelligent Vehicles</td><td><a href="http://www.path.berkeley.edu/sites/default/files/my_folder_76/Pub_03.2016_Role.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>35ba4ebfd017a56b51e967105af9ae273c9b0178</td></tr><tr><td>lag</td><td>LAG</td><td>Large Age-Gap Face Verification by Feature Injection in Deep Networks</td><td>Large age-gap face verification by feature injection in deep networks</td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large age-gap face verification by feature injection in deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td></tr><tr><td>large_scale_person_search</td><td>Large Scale Person Search</td><td>End-to-End Deep Learning for Person Search</td><td>End-to-End Deep Learning for Person Search</td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end deep learning for person search&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td></tr><tr><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td>Learning Effective Human Pose Estimation from Inaccurate Annotation</td><td>Learning effective human pose estimation from inaccurate annotation</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995318" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning effective human pose estimation from inaccurate annotation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild : A Survey</td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: Updates and New Reporting Procedures</td><td>Labeled Faces in the Wild : Updates and New Reporting Procedures</td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: updates and new reporting procedures&sort=relevance" target="_blank">[s2]</a></td><td>University of Massachusetts</td><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td></tr><tr><td>lfw_a</td><td>LFW-a</td><td>Effective Unconstrained Face Recognition by - Combining Multiple Descriptors and Learned - Background Statistics</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=effective unconstrained face recognition by - combining multiple descriptors and learned - background statistics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>133f01aec1534604d184d56de866a4bd531dac87</td></tr><tr><td>lfw_p</td><td>LFWP</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995602" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=localizing parts of faces using a consensus of exemplars&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>140438a77a771a8fb656b39a78ff488066eb6b50</td></tr><tr><td>m2vts</td><td>m2vts</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the m2vts multimodal face database (release 1.00)&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>mafl</td><td>MAFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mafl</td><td>MAFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>malf</td><td>MALF</td><td>Fine-grained Evaluation on Face Detection in the Wild.</td><td>Fine-grained evaluation on face detection in the wild</td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained evaluation on face detection in the wild.&sort=relevance" target="_blank">[s2]</a></td><td>Chinese Academy of Sciences</td><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td></tr><tr><td>mapillary</td><td>Mapillary</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237796" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mapillary vistas dataset for semantic understanding of street scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Scalable Person Re-identification: A Benchmark</td><td>Scalable Person Re-identification: A Benchmark</td><td><a href="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/01/ICCV15-ReIDDataset.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scalable person re-identification: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td></tr><tr><td>market1203</td><td>Market 1203</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>mars</td><td>MARS</td><td>MARS: A Video Benchmark for Large-Scale Person Re-identification</td><td>MARS: A Video Benchmark for Large-Scale Person Re-Identification</td><td><a href="http://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=mars: a video benchmark for large-scale person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Hierarchical Temporal Graphical Model for Head Pose Estimation and Subsequent Attribute Classification in Real-World Videos</td><td>Robust semi-automatic head pose labeling for real-world face video sequences</td><td><a href="https://doi.org/10.1007/s11042-012-1352-1" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos&sort=relevance" target="_blank">[s2]</a></td><td>McGill University</td><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Robust Semi-automatic Head Pose Labeling for Real-World Face Video Sequences</td><td>Robust semi-automatic head pose labeling for real-world face video sequences</td><td><a href="https://doi.org/10.1007/s11042-012-1352-1" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust semi-automatic head pose labeling for real-world face video sequences&sort=relevance" target="_blank">[s2]</a></td><td>McGill University</td><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td></tr><tr><td>megaage</td><td>MegaAge</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=quantifying facial age by posterior of age comparisons&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>d80a3d1f3a438e02a6685e66ee908446766fefa9</td></tr><tr><td>megaface</td><td>MegaFace</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the megaface benchmark: 1 million faces for recognition at scale&sort=relevance" target="_blank">[s2]</a></td><td>University of Washington</td><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td></tr><tr><td>megaface</td><td>MegaFace</td><td>Level Playing Field for Million Scale Face Recognition</td><td>Level Playing Field for Million Scale Face Recognition</td><td><a href="https://arxiv.org/pdf/1705.00393.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=level playing field for million scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td></tr><tr><td>mifs</td><td>MIFS</td><td>Spoofing Faces Using Makeup: An Investigative Study</td><td>Spoofing faces using makeup: An investigative study</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=spoofing faces using makeup: an investigative study&sort=relevance" target="_blank">[s2]</a></td><td>INRIA Méditerranée</td><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td></tr><tr><td>mit_cbcl</td><td>MIT CBCL</td><td>Component-based Face Recognition with 3D Morphable Models</td><td>Component-Based Face Recognition with 3D Morphable Models</td><td><a href="http://www.bheisele.com/avbpa2003.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=component-based face recognition with 3d morphable models&sort=relevance" target="_blank">[s2]</a></td><td></td><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td></tr><tr><td>miw</td><td>MIW</td><td>Automatic Facial Makeup Detection with Application in Face Recognition</td><td>Automatic facial makeup detection with application in face recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic facial makeup detection with application in face recognition&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr><tr><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td>WEB-BASED DATABASE FOR FACIAL EXPRESSION ANALYSIS</td><td>Web-based database for facial expression analysis</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ICME2005-final.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=web-based database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td></tr><tr><td>moments_in_time</td><td>Moments in Time</td><td>Moments in Time Dataset: one million videos for event understanding</td><td>Moments in Time Dataset: one million videos for event understanding</td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=moments in time dataset: one million videos for event understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td></tr><tr><td>morph</td><td>MORPH Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>morph_nc</td><td>MORPH Non-Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mot</td><td>MOT</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to associate: hybridboosted multi-target tracker for crowded scene&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mpi_large</td><td>Large MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpi_small</td><td>Small MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpii_gaze</td><td>MPIIGaze</td><td>Appearance-based Gaze Estimation in the Wild</td><td>Appearance-based gaze estimation in the wild</td><td><a href="https://scalable.mpi-inf.mpg.de/files/2015/09/zhang_CVPR15.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=appearance-based gaze estimation in the wild&sort=relevance" target="_blank">[s2]</a></td><td>Max Planck Institute for Informatics</td><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td></tr><tr><td>mpii_human_pose</td><td>MPII Human Pose</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909866" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=2d human pose estimation: new benchmark and state of the art analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><a href="https://pdfs.semanticscholar.org/be5b/455abd379240460d022a0e246615b0b86c14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance" target="_blank">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>mrp_drone</td><td>MRP Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td><a href="http://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating open-world person re-identification using a drone&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td></tr><tr><td>msceleb</td><td>MsCeleb</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ms-celeb-1m: a dataset and benchmark for large-scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>291265db88023e92bb8c8e6390438e5da148e8f5</td></tr><tr><td>msmt_17</td><td>MSMT17</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td><a href="https://arxiv.org/pdf/1711.08565.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person transfer gan to bridge domain gap for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ec792ad2433b6579f2566c932ee414111e194537</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>muct</td><td>MUCT</td><td>The MUCT Landmarked Face Database</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the muct landmarked face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>mug_faces</td><td>MUG Faces</td><td>The MUG Facial Expression Database</td><td>The MUG facial expression database</td><td><a href="http://ieeexplore.ieee.org/document/5617662/" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mug facial expression database&sort=relevance" target="_blank">[s2]</a></td><td>Aristotle University of Thessaloniki</td><td>f1af714b92372c8e606485a3982eab2f16772ad8</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</td><td><a href="http://dl.acm.org/citation.cfm?id=2337184" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance" target="_blank">[s2]</a></td><td></td><td>109df0e8e5969ddf01e073143e83599228a1163f</td></tr><tr><td>names_and_faces_news</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>nd_2006</td><td>ND-2006</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401928" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=using a multi-instance enrollment representation to improve 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Crowdsourcing facial expressions for affective-interaction</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=crowdsourcing facial expressions for affective-interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>nudedetection</td><td>Nude Detection</td><td>A Bag-of-Features Approach based on Hue-SIFT Descriptor for Nude Detection</td><td>A bag-of-features approach based on Hue-SIFT descriptor for nude detection</td><td><a href="http://ieeexplore.ieee.org/document/7077625/" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a bag-of-features approach based on hue-sift descriptor for nude detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td></tr><tr><td>orl</td><td>ORL</td><td>Parameterisation of a Stochastic Model for Human Face Identification</td><td>Parameterisation of a stochastic model for human face identification</td><td><a href="http://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=parameterisation of a stochastic model for human face identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55206f0b5f57ce17358999145506cd01e570358c</td></tr><tr><td>penn_fudan</td><td>Penn Fudan</td><td>Object Detection Combining Recognition and Segmentation</td><td>Object Detection Combining Recognition and Segmentation</td><td><a href="http://pdfs.semanticscholar.org/f531/a554cade14b9b340de6730683a28c292dd74.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object detection combining recognition and segmentation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td></tr><tr><td>peta</td><td>PETA</td><td>Pedestrian Attribute Recognition At Far Distance</td><td>Pedestrian Attribute Recognition At Far Distance</td><td><a href="http://personal.ie.cuhk.edu.hk/~pluo/pdf/mm14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute recognition at far distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td></tr><tr><td>pets</td><td>PETS 2017</td><td>PETS 2017: Dataset and Challenge</td><td>PETS 2017: Dataset and Challenge</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014998" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pets 2017: dataset and challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classication</td><td>Summary of Research on Informant Accuracy in Network Data, 11 and on the Reverse Small World Problem</td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td></tr><tr><td>pipa</td><td>PIPA</td><td>Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues</td><td>Beyond frontal faces: Improving Person Recognition using multiple cues</td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=beyond frontal faces: improving person recognition using multiple cues&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0a85bdff552615643dd74646ac881862a7c7072d</td></tr><tr><td>pku</td><td>PKU</td><td>Swiss-System Based Cascade Ranking for Gait-based Person Re-identification</td><td>Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</td><td><a href="http://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=swiss-system based cascade ranking for gait-based person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Orientation driven bag of appearances for person re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>pornodb</td><td>Pornography DB</td><td>Pooling in Image Representation: the Visual Codeword Point of View</td><td>Pooling in image representation: The visual codeword point of view</td><td><a href="http://pdfs.semanticscholar.org/b92a/1ed9622b8268ae3ac9090e25789fc41cc9b8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pooling in image representation: the visual codeword point of view&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td></tr><tr><td>precarious</td><td>Precarious</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians With Adversarial Imposters</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</td><td><a href="https://arxiv.org/pdf/1703.06283.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=expecting the unexpected: training detectors for unusual pedestrians with adversarial imposters&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td></tr><tr><td>prid</td><td>PRID</td><td>Person Re-Identification by Descriptive and Discriminative Classification</td><td>Person Re-identification by Descriptive and Discriminative Classification</td><td><a href="http://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification by descriptive and discriminative classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td></tr><tr><td>prw</td><td>PRW</td><td>Person Re-identification in the Wild</td><td>Person Re-identification in the Wild</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.357" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification in the wild&sort=relevance" target="_blank">[s2]</a></td><td>University of Technology Sydney</td><td>0b84f07af44f964817675ad961def8a51406dd2e</td></tr><tr><td>psu</td><td>PSU</td><td>Vision-based Analysis of Small Groups in Pedestrian Crowds</td><td>Vision-Based Analysis of Small Groups in Pedestrian Crowds</td><td><a href="http://vision.cse.psu.edu/publications/pdfs/GeCollinsRubackPAMI2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision-based analysis of small groups in pedestrian crowds&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066000d44d6691d27202896691f08b27117918b9</td></tr><tr><td>pubfig</td><td>PubFig</td><td>Attribute and Simile Classifiers for Face Verification</td><td>Attribute and simile classifiers for face verification</td><td><a href="http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=attribute and simile classifiers for face verification&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td></tr><tr><td>pubfig_83</td><td>pubfig83</td><td>Scaling Up Biologically-Inspired Computer Vision: A Case Study in Unconstrained Face Recognition on Facebook</td><td>Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scaling up biologically-inspired computer vision: a case study in unconstrained face recognition on facebook&sort=relevance" target="_blank">[s2]</a></td><td>Harvard University</td><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Multi-Camera Activity Correlation Analysis</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="https://doi.org/10.1007/s11263-010-0347-5" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-camera activity correlation analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Time-delayed correlation analysis for multi-camera activity understanding</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="https://doi.org/10.1007/s11263-010-0347-5" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=time-delayed correlation analysis for multi-camera activity understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td>Surveillance Face Recognition Challenge</td><td>Surveillance Face Recognition Challenge</td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=surveillance face recognition challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td></tr><tr><td>rafd</td><td>RaFD</td><td>Presentation and validation of the Radboud Faces Database</td><td>Presentation and validation of the Radboud Faces Database</td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=presentation and validation of the radboud faces database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td></tr><tr><td>raid</td><td>RAiD</td><td>Consistent Re-identification in a Camera Network</td><td>Consistent Re-identification in a Camera Network</td><td><a href="http://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=consistent re-identification in a camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>09d78009687bec46e70efcf39d4612822e61cb8c</td></tr><tr><td>rap_pedestrian</td><td>RAP</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td><a href="http://pdfs.semanticscholar.org/221c/18238b829c12b911706947ab38fd017acef7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a richly annotated dataset for pedestrian attribute recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>221c18238b829c12b911706947ab38fd017acef7</td></tr><tr><td>reseed</td><td>ReSEED</td><td>ReSEED: Social Event dEtection Dataset</td><td>ReSEED: social event dEtection dataset</td><td><a href="https://pub.uni-bielefeld.de/download/2663466/2686734" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=reseed: social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>54983972aafc8e149259d913524581357b0f91c3</td></tr><tr><td>saivt</td><td>SAIVT SoftBio</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a database for person re-identification in multi-camera surveillance networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td></tr><tr><td>sarc3d</td><td>Sarc3D</td><td>SARC3D: a new 3D body model for People Tracking and Re-identification</td><td>SARC3D: A New 3D Body Model for People Tracking and Re-identification</td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sarc3d: a new 3d body model for people tracking and re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>Large Variability Surveillance Camera Face Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td></tr><tr><td>scut_fbp</td><td>SCUT-FBP</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td><a href="https://arxiv.org/pdf/1511.02459.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scut-fbp: a benchmark dataset for facial beauty perception&sort=relevance" target="_blank">[s2]</a></td><td>South China University of Technology</td><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td></tr><tr><td>scut_head</td><td>SCUT HEAD</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting heads using feature refine net and cascaded multi-scale architecture&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a spatio-temporal appearance representation for video-based pedestrian re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Local descriptors encoded by Fisher vectors for person re-identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=local descriptors encoded by fisher vectors for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sheffield</td><td>Sheffield Face</td><td>Face Recognition: From Theory to Applications</td><td>Face Description with Local Binary Patterns: Application to Face Recognition</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition: from theory to applications&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td></tr><tr><td>social_relation</td><td>Social Relation</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>Learning Social Relation Traits from Face Images</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>2a171f8d14b6b8735001a11c217af9587d095848</td></tr><tr><td>social_relation</td><td>Social Relation</td><td>Learning Social Relation Traits from Face Images</td><td>Learning Social Relation Traits from Face Images</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>2a171f8d14b6b8735001a11c217af9587d095848</td></tr><tr><td>soton</td><td>SOTON HiD</td><td>On a Large Sequence-Based Human Gait Database</td><td>On a large sequence-based human gait database</td><td><a href="http://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=on a large sequence-based human gait database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td></tr><tr><td>sports_videos_in_the_wild</td><td>SVW</td><td>Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis</td><td>Sports Videos in the Wild (SVW): A video dataset for sports analysis</td><td><a href="http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sports videos in the wild (svw): a video dataset for sports analysis&sort=relevance" target="_blank">[s2]</a></td><td>Michigan State University</td><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td></tr><tr><td>stair_actions</td><td>STAIR Action</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stair actions: a video dataset of everyday home actions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Learning to Predict Human Behavior in Crowded Scenes</td><td><a href="http://pdfs.semanticscholar.org/c9bd/a86e23cab9e4f15ea0c4cbb6cc02b9dfb709.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_family</td><td>We Are Family Stickmen</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td><a href="http://pdfs.semanticscholar.org/0dc1/1a37cadda92886c56a6fb5191ded62099c28.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=we are family: joint pose estimation of multiple persons&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the sun attribute database: beyond categories for deeper scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>SUN Attribute Database: +<!doctype html><html><head><meta charset='utf-8'><title>Paper Title Sanity Check</title><link rel='stylesheet' href='reports.css'></head><body><h2>Paper Title Sanity Check</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>yfcc_100m</td><td>YFCC100M</td><td>YFCC100M: The New Data in Multimedia Research</td><td>YFCC100M: the new data in multimedia research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yfcc100m: the new data in multimedia research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td></tr><tr><td>fiw_300</td><td>300-W</td><td>A semi-automatic methodology for facial landmark annotation</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a semi-automatic methodology for facial landmark annotation&sort=relevance" target="_blank">[s2]</a></td><td>University of Twente</td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>buhmap_db</td><td>BUHMAP-DB</td><td>Facial Feature Tracking and Expression Recognition for Sign Language</td><td>Facial feature tracking and expression recognition for sign language</td><td><a href="http://www.cmpe.boun.edu.tr/pilab/pilabfiles/databases/buhmap/files/ari2008facialfeaturetracking.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial feature tracking and expression recognition for sign language&sort=relevance" target="_blank">[s2]</a></td><td></td><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td></tr><tr><td>vqa</td><td>VQA</td><td>VQA: Visual Question Answering</td><td>VQA: Visual Question Answering</td><td><a href="http://arxiv.org/pdf/1505.00468v3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vqa: visual question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>01959ef569f74c286956024866c1d107099199f7</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>Vision meets robotics: The KITTI dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td></tr><tr><td>ilids_mcts</td><td>i-LIDS</td><td>Imagery Library for Intelligent Detection Systems: The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems: the i-lids user guide&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ucf_selfie</td><td>UCF Selfie</td><td>How to Take a Good Selfie?</td><td>How to Take a Good Selfie?</td><td><a href="http://doi.acm.org/10.1145/2733373.2806365" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=how to take a good selfie?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge</td><td>300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_iccv_2013_300_w.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: the first facial landmark localization challenge&sort=relevance" target="_blank">[s2]</a></td><td>University of Twente</td><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td></tr><tr><td>chokepoint</td><td>ChokePoint</td><td>Patch-based Probabilistic Image Quality Assessment for Face Selection and Improved Video-based Face Recognition</td><td>Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</td><td><a href="http://conradsanderson.id.au/pdfs/wong_face_selection_cvpr_biometrics_2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=patch-based probabilistic image quality assessment for face selection and improved video-based face recognition&sort=relevance" target="_blank">[s2]</a></td><td>University of Queensland</td><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td></tr><tr><td>hipsterwars</td><td>Hipsterwars</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td><a href="http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hipster wars: discovering elements of fashion styles&sort=relevance" target="_blank">[s2]</a></td><td>Tohoku University</td><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td></tr><tr><td>psu</td><td>PSU</td><td>Vision-based Analysis of Small Groups in Pedestrian Crowds</td><td>Vision-Based Analysis of Small Groups in Pedestrian Crowds</td><td><a href="http://vision.cse.psu.edu/publications/pdfs/GeCollinsRubackPAMI2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision-based analysis of small groups in pedestrian crowds&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066000d44d6691d27202896691f08b27117918b9</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database and evaluation with a new detection algorithm&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>columbia_gaze</td><td>Columbia Gaze</td><td>Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction</td><td>Gaze locking: passive eye contact detection for human-object interaction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gaze locking: passive eye contact detection for human–object interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>06f02199690961ba52997cde1527e714d2b3bf8f</td></tr><tr><td>d3dfacs</td><td>D3DFACS</td><td>A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling</td><td>A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a facs valid 3d dynamic action unit database with applications to 3d dynamic morphable facial modelling&sort=relevance" target="_blank">[s2]</a></td><td>Jacobs University</td><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td></tr><tr><td>mit_cbcl</td><td>MIT CBCL</td><td>Component-based Face Recognition with 3D Morphable Models</td><td>Component-Based Face Recognition with 3D Morphable Models</td><td><a href="http://www.bheisele.com/avbpa2003.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=component-based face recognition with 3d morphable models&sort=relevance" target="_blank">[s2]</a></td><td></td><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td></tr><tr><td>uccs</td><td>UCCS</td><td>Large scale unconstrained open set face database</td><td>Large scale unconstrained open set face database</td><td><a href="http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large scale unconstrained open set face database&sort=relevance" target="_blank">[s2]</a></td><td>University of Colorado at Colorado Springs</td><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Understanding Kin Relationships in a Photo</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding kin relationships in a photo&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>raid</td><td>RAiD</td><td>Consistent Re-identification in a Camera Network</td><td>Consistent Re-identification in a Camera Network</td><td><a href="http://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=consistent re-identification in a camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>09d78009687bec46e70efcf39d4612822e61cb8c</td></tr><tr><td>pipa</td><td>PIPA</td><td>Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues</td><td>Beyond frontal faces: Improving Person Recognition using multiple cues</td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=beyond frontal faces: improving person recognition using multiple cues&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0a85bdff552615643dd74646ac881862a7c7072d</td></tr><tr><td>kinectface</td><td>KinectFaceDB</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinectfacedb: a kinect database for face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td></tr><tr><td>prw</td><td>PRW</td><td>Person Re-identification in the Wild</td><td>Person Re-identification in the Wild</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.357" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification in the wild&sort=relevance" target="_blank">[s2]</a></td><td>University of Technology Sydney</td><td>0b84f07af44f964817675ad961def8a51406dd2e</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret verification testing protocol for face recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td><a href="http://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=weak hypotheses and boosting for generic object detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c91808994a250d7be332400a534a9291ca3b60e</td></tr><tr><td>ijb_c</td><td>IJB-B</td><td>IARPA Janus Benchmark-B Face Dataset</td><td>IARPA Janus Benchmark-B Face Dataset</td><td><a href="http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark-b face dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>hollywood_headset</td><td>HollywoodHeads</td><td>Context-aware CNNs for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware cnns for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>lag</td><td>LAG</td><td>Large Age-Gap Face Verification by Feature Injection in Deep Networks</td><td>Large age-gap face verification by feature injection in deep networks</td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large age-gap face verification by feature injection in deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td></tr><tr><td>face_scrub</td><td>FaceScrub</td><td>A data-driven approach to cleaning large face datasets</td><td>A data-driven approach to cleaning large face datasets</td><td><a href="https://doi.org/10.1109/ICIP.2014.7025068" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a data-driven approach to cleaning large face datasets&sort=relevance" target="_blank">[s2]</a></td><td>University of Illinois, Urbana-Champaign</td><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td></tr><tr><td>stickmen_family</td><td>We Are Family Stickmen</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td><a href="http://pdfs.semanticscholar.org/0dc1/1a37cadda92886c56a6fb5191ded62099c28.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=we are family: joint pose estimation of multiple persons&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td></tr><tr><td>mpii_gaze</td><td>MPIIGaze</td><td>Appearance-based Gaze Estimation in the Wild</td><td>Appearance-based gaze estimation in the wild</td><td><a href="https://scalable.mpi-inf.mpg.de/files/2015/09/zhang_CVPR15.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=appearance-based gaze estimation in the wild&sort=relevance" target="_blank">[s2]</a></td><td>Max Planck Institute for Informatics</td><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td></tr><tr><td>afw</td><td>AFW</td><td>Face detection, pose estimation and landmark localization in the wild</td><td>Face detection, pose estimation, and landmark localization in the wild</td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face detection, pose estimation and landmark localization in the wild&sort=relevance" target="_blank">[s2]</a></td><td>University of California, Irvine</td><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td></tr><tr><td>voc</td><td>VOC</td><td>The PASCAL Visual Object Classes (VOC) Challenge</td><td>The Pascal Visual Object Classes (VOC) Challenge</td><td><a href="https://doi.org/10.1007/s11263-009-0275-4" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the pascal visual object classes (voc) challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret evaluation methodology for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>Deep expectation of real and apparent age from a single image without facial landmarks</td><td>Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</td><td><a href="https://doi.org/10.1007/s11263-016-0940-3" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep expectation of real and apparent age from a single image without facial landmarks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10195a163ab6348eef37213a46f60a3d87f289c5</td></tr><tr><td>inria_person</td><td>INRIA Pedestrian</td><td>Histograms of Oriented Gradients for Human Detection</td><td>Histograms of oriented gradients for human detection</td><td><a href="http://nichol.as/papers/Dalai/Histograms%20of%20oriented%20gradients%20for%20human%20detection.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=histograms of oriented gradients for human detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Object Recognition Using Segmentation for Feature Detection</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object recognition using segmentation for feature detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>lfw_a</td><td>LFW-a</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=effective unconstrained face recognition by combining multiple descriptors and learned background statistics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>133f01aec1534604d184d56de866a4bd531dac87</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset</td><td>Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</td><td><a href="https://doi.org/10.1109/BTAS.2013.6712710" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=is the eye region more reliable than the face? a preliminary study of face-based recognition on a transgender dataset&sort=relevance" target="_blank">[s2]</a></td><td>University of North Carolina Wilmington</td><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td></tr><tr><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td>Depth and Appearance for Mobile Scene Analysis</td><td>Depth and Appearance for Mobile Scene Analysis</td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07.pdf/at_download/file" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=depth and appearance for mobile scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td></tr><tr><td>lfw_p</td><td>LFWP</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995602" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=localizing parts of faces using a consensus of exemplars&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>140438a77a771a8fb656b39a78ff488066eb6b50</td></tr><tr><td>ijb_c</td><td>IJB-A</td><td>Pushing the Frontiers of Unconstrained Face Detection and Recognition: IARPA Janus Benchmark A</td><td>Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the frontiers of unconstrained face detection and recognition: iarpa janus benchmark a&sort=relevance" target="_blank">[s2]</a></td><td></td><td>140c95e53c619eac594d70f6369f518adfea12ef</td></tr><tr><td>vgg_faces</td><td>VGG Face</td><td>Deep Face Recognition</td><td>Deep Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td></tr><tr><td>prid</td><td>PRID</td><td>Person Re-Identification by Descriptive and Discriminative Classification</td><td>Person Re-identification by Descriptive and Discriminative Classification</td><td><a href="http://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification by descriptive and discriminative classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td></tr><tr><td>umb</td><td>UMB</td><td>UMB-DB: A Database of Partially Occluded 3D Faces</td><td>UMB-DB: A database of partially occluded 3D faces</td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130509" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umb-db: a database of partially occluded 3d faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td></tr><tr><td>celeba_plus</td><td>CelebFaces+</td><td>Deep Learning Face Representation from Predicting 10,000 Classes</td><td>Deep Learning Face Representation from Predicting 10,000 Classes</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Sun_Deep_Learning_Face_2014_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep learning face representation from predicting 10,000 classes&sort=relevance" target="_blank">[s2]</a></td><td>Shenzhen Institutes of Advanced Technology</td><td>177bc509dd0c7b8d388bb47403f28d6228c14b5c</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>GeoFaceExplorer: Exploring the Geo-Dependence of Facial Attributes</td><td>GeoFaceExplorer: exploring the geo-dependence of facial attributes</td><td><a href="http://doi.acm.org/10.1145/2676440.2676443" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=geofaceexplorer: exploring the geo-dependence of facial attributes&sort=relevance" target="_blank">[s2]</a></td><td>University of Kentucky</td><td>17b46e2dad927836c689d6787ddb3387c6159ece</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780493" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepfashion: powering robust clothes recognition and retrieval with rich annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</td><td><a href="https://pdfs.semanticscholar.org/03c1/fc9c3339813ed81ad0de540132f9f695a0f8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from few to many: illumination cone models for face recognition under variable lighting and pose&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>york_3d</td><td>UOY 3D Face Database</td><td>Three-Dimensional Face Recognition: An Eigensurface Approach</td><td>Three-dimensional face recognition: an eigensurface approach</td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=three-dimensional face recognition: an eigensurface approach&sort=relevance" target="_blank">[s2]</a></td><td></td><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td></tr><tr><td>sports_videos_in_the_wild</td><td>SVW</td><td>Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis</td><td>Sports Videos in the Wild (SVW): A video dataset for sports analysis</td><td><a href="http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sports videos in the wild (svw): a video dataset for sports analysis&sort=relevance" target="_blank">[s2]</a></td><td>Michigan State University</td><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td></tr><tr><td>jpl_pose</td><td>JPL-Interaction dataset</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Ryoo_First-Person_Activity_Recognition_2013_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=first-person activity recognition: what are they doing to me?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td></tr><tr><td>adience</td><td>Adience</td><td>Age and Gender Estimation of Unfiltered Faces</td><td>Age and Gender Estimation of Unfiltered Faces</td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=age and gender estimation of unfiltered faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td></tr><tr><td>youtube_poses</td><td>YouTube Pose</td><td>Personalizing Human Video Pose Estimation</td><td>Personalizing Human Video Pose Estimation</td><td><a href="http://arxiv.org/pdf/1511.06676v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=personalizing human video pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: A Benchmark</td><td>Pedestrian detection: A benchmark</td><td><a href="http://vision.ucsd.edu/~pdollar/files/papers/DollarCVPR09peds.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td></tr><tr><td>immediacy</td><td>Immediacy</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.383" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-task recurrent neural network for immediacy prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td></tr><tr><td>cafe</td><td>CAFE</td><td>The Child Affective Facial Expression (CAFE) Set: Validity and reliability from untrained adults</td><td>The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</td><td><a href="http://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the child affective facial expression (cafe) set: validity and reliability from untrained adults&sort=relevance" target="_blank">[s2]</a></td><td></td><td>20388099cc415c772926e47bcbbe554e133343d1</td></tr><tr><td>bbc_pose</td><td>BBC Pose</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td><a href="https://doi.org/10.1007/s11263-013-0672-6" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic and efficient human pose estimation for sign language videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>213a579af9e4f57f071b884aa872651372b661fd</td></tr><tr><td>3d_rma</td><td>3D-RMA</td><td>Automatic 3D Face Authentication</td><td>Automatic 3D face authentication</td><td><a href="http://pdfs.semanticscholar.org/2160/788824c4c29ffe213b2cbeb3f52972d73f37.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic 3d face authentication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td></tr><tr><td>large_scale_person_search</td><td>Large Scale Person Search</td><td>End-to-End Deep Learning for Person Search</td><td>End-to-End Deep Learning for Person Search</td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end deep learning for person search&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>rap_pedestrian</td><td>RAP</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td><a href="http://pdfs.semanticscholar.org/221c/18238b829c12b911706947ab38fd017acef7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a richly annotated dataset for pedestrian attribute recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>221c18238b829c12b911706947ab38fd017acef7</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td><a href="http://pdfs.semanticscholar.org/2e0b/00f4043e2d4b04c59c88bb54bcd907d0dcd4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2258e01865367018ed6f4262c880df85b94959f8</td></tr><tr><td>saivt</td><td>SAIVT SoftBio</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a database for person re-identification in multi-camera surveillance networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td></tr><tr><td>pets</td><td>PETS 2017</td><td>PETS 2017: Dataset and Challenge</td><td>PETS 2017: Dataset and Challenge</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014998" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pets 2017: dataset and challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td></tr><tr><td>gallagher</td><td>Gallagher</td><td>Clothing Cosegmentation for Recognizing People</td><td>Clothing cosegmentation for recognizing people</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2008.4587481" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing cosegmentation for recognizing people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22ad2c8c0f4d6aa4328b38d894b814ec22579761</td></tr><tr><td>expw</td><td>ExpW</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="http://arxiv.org/abs/1609.06426" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>mifs</td><td>MIFS</td><td>Spoofing Faces Using Makeup: An Investigative Study</td><td>Spoofing faces using makeup: An investigative study</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=spoofing faces using makeup: an investigative study&sort=relevance" target="_blank">[s2]</a></td><td>INRIA Méditerranée</td><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td></tr><tr><td>cohn_kanade</td><td>CK</td><td>Comprehensive Database for Facial Expression Analysis</td><td>Comprehensive Database for Facial Expression Analysis</td><td><a href="http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=comprehensive database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td>Carnegie Mellon University</td><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td></tr><tr><td>cas_peal</td><td>CAS-PEAL</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cas-peal large-scale chinese face database and baseline evaluations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>faceplace</td><td>Face Place</td><td>Recognizing disguised faces</td><td>Recognizing disguised faces</td><td><a href="http://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognizing disguised faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td><a href="http://pdfs.semanticscholar.org/2624/d84503bc2f8e190e061c5480b6aa4d89277a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=afew-va database for valence and arousal estimation in-the-wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td></tr><tr><td>cofw</td><td>COFW</td><td>Robust face landmark estimation under occlusion</td><td>Robust Face Landmark Estimation under Occlusion</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face landmark estimation under occlusion&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2724ba85ec4a66de18da33925e537f3902f21249</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>h3d</td><td>H3D</td><td>Poselets: Body Part Detectors Trained Using 3D Human Pose Annotations</td><td>Poselets: Body part detectors trained using 3D human pose annotations</td><td><a href="http://vision.stanford.edu/teaching/cs231b_spring1213/papers/ICCV09_BourdevMalik.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=poselets: body part detectors trained using 3d human pose annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2830fb5282de23d7784b4b4bc37065d27839a412</td></tr><tr><td>megaface</td><td>MegaFace</td><td>Level Playing Field for Million Scale Face Recognition</td><td>Level Playing Field for Million Scale Face Recognition</td><td><a href="https://arxiv.org/pdf/1705.00393.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=level playing field for million scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td></tr><tr><td>msceleb</td><td>MsCeleb</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ms-celeb-1m: a dataset and benchmark for large-scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>291265db88023e92bb8c8e6390438e5da148e8f5</td></tr><tr><td>ferplus</td><td>FER+</td><td>Training Deep Networks for Facial Expression Recognition with Crowd-Sourced Label Distribution</td><td>Training deep networks for facial expression recognition with crowd-sourced label distribution</td><td><a href="http://arxiv.org/pdf/1608.01041v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=training deep networks for facial expression recognition with crowd-sourced label distribution&sort=relevance" target="_blank">[s2]</a></td><td></td><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>SCface – surveillance cameras face database</td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td></tr><tr><td>social_relation</td><td>Social Relation</td><td>Learning Social Relation Traits from Face Images</td><td>Learning Social Relation Traits from Face Images</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>2a171f8d14b6b8735001a11c217af9587d095848</td></tr><tr><td>peta</td><td>PETA</td><td>Pedestrian Attribute Recognition At Far Distance</td><td>Pedestrian Attribute Recognition At Far Distance</td><td><a href="http://personal.ie.cuhk.edu.hk/~pluo/pdf/mm14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute recognition at far distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td></tr><tr><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td>WEB-BASED DATABASE FOR FACIAL EXPRESSION ANALYSIS</td><td>Web-based database for facial expression analysis</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ICME2005-final.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=web-based database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td></tr><tr><td>bosphorus</td><td>The Bosphorus</td><td>Bosphorus Database for 3D Face Analysis</td><td>Bosphorus Database for 3D Face Analysis</td><td><a href="http://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=bosphorus database for 3d face analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2acf7e58f0a526b957be2099c10aab693f795973</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>Acquiring Linear Subspaces for Face Recognition under Variable Lighting</td><td>Acquiring linear subspaces for face recognition under variable lighting</td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/pami05.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=acquiring linear subspaces for face recognition under variable lighting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td></tr><tr><td>frav3d</td><td>FRAV3D</td><td>MULTIMODAL 2D, 2.5D & 3D FACE VERIFICATION</td><td>Multimodal 2D, 2.5D & 3D Face Verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal 2d, 2.5d & 3d face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2b926b3586399d028b46315d7d9fb9d879e4f79c</td></tr><tr><td>clothing_co_parsing</td><td>CCP</td><td>Clothing Co-Parsing by Joint Image Segmentation and Labeling</td><td>Clothing Co-parsing by Joint Image Segmentation and Labeling</td><td><a href="https://arxiv.org/pdf/1502.00739v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing co-parsing by joint image segmentation and labeling&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2bf8541199728262f78d4dced6fb91479b39b738</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Anthropometric 3D Face Recognition</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=anthropometric 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: Updates and New Reporting Procedures</td><td>Labeled Faces in the Wild : Updates and New Reporting Procedures</td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: updates and new reporting procedures&sort=relevance" target="_blank">[s2]</a></td><td>University of Massachusetts</td><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td></tr><tr><td>a_pascal_yahoo</td><td>aPascal</td><td>Describing Objects by their Attributes</td><td>Describing objects by their attributes</td><td><a href="http://www-2.cs.cmu.edu/~dhoiem/publications/cvpr2009_attributes.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing objects by their attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td></tr><tr><td>3dpes</td><td>3DPeS</td><td>3DPes: 3D People Dataset for Surveillance and Forensics</td><td>3DPeS: 3D people dataset for surveillance and forensics</td><td><a href="http://doi.acm.org/10.1145/2072572.2072590" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=3dpes: 3d people dataset for surveillance and forensics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Genealogical Face Recognition based on UB KinFace Database</td><td>Genealogical face recognition based on UB KinFace database</td><td><a href="https://doi.org/10.1109/CVPRW.2011.5981801" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=genealogical face recognition based on ub kinface database&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Buffalo</td><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Time-delayed correlation analysis for multi-camera activity understanding</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="https://doi.org/10.1007/s11263-010-0347-5" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=time-delayed correlation analysis for multi-camera activity understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Generic Object Recognition with Boosting</td><td>Generic object recognition with boosting</td><td><a href="http://www.emt.tu-graz.ac.at/~pinz/onlinepapers/Reprint_Vol_28_No_3_2006.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=generic object recognition with boosting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Investigating the Periocular-Based Face Recognition Across Gender Transformation</td><td>Investigating the Periocular-Based Face Recognition Across Gender Transformation</td><td><a href="https://doi.org/10.1109/TIFS.2014.2361479" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating the periocular-based face recognition across gender transformation&sort=relevance" target="_blank">[s2]</a></td><td>University of North Carolina at Wilmington</td><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td></tr><tr><td>names_and_faces_news</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>umd_faces</td><td>UMD</td><td>UMDFaces: An Annotated Face Dataset for Training Deep Networks</td><td>UMDFaces: An annotated face dataset for training deep networks</td><td><a href="http://arxiv.org/abs/1611.01484" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umdfaces: an annotated face dataset for training deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b05f65405534a696a847dd19c621b7b8588263</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>feret</td><td>FERET</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td><a href="http://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=feret ( face recognition technology ) recognition algorithm development and test results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td></tr><tr><td>ucf_crowd</td><td>UCF-CC-50</td><td>Multi-Source Multi-Scale Counting in Extremely Dense Crowd Images</td><td>Multi-source Multi-scale Counting in Extremely Dense Crowd Images</td><td><a href="http://www.cs.ucf.edu/~haroon/datafiles/Idrees_Counting_CVPR_2013.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-source multi-scale counting in extremely dense crowd images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td><a href="https://arxiv.org/pdf/1604.01685.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset for semantic urban scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td></tr><tr><td>tud_campus</td><td>TUD-Campus</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_crossing</td><td>TUD-Crossing</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>mpii_human_pose</td><td>MPII Human Pose</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909866" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=2d human pose estimation: new benchmark and state of the art analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td></tr><tr><td>penn_fudan</td><td>Penn Fudan</td><td>Object Detection Combining Recognition and Segmentation</td><td>Object Detection Combining Recognition and Segmentation</td><td><a href="http://pdfs.semanticscholar.org/f531/a554cade14b9b340de6730683a28c292dd74.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object detection combining recognition and segmentation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td></tr><tr><td>wider</td><td>WIDER</td><td>Recognize Complex Events from Static Images by Fusing Deep Channels</td><td>Recognize complex events from static images by fusing deep channels</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognize complex events from static images by fusing deep channels&sort=relevance" target="_blank">[s2]</a></td><td>Shenzhen Institutes of Advanced Technology</td><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td></tr><tr><td>coco_qa</td><td>COCO QA</td><td>Exploring Models and Data for Image Question Answering</td><td>Exploring Models and Data for Image Question Answering</td><td><a href="http://pdfs.semanticscholar.org/aa79/9c29c0d44ece1864467af520fe70540c069b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring models and data for image question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>rafd</td><td>RaFD</td><td>Presentation and validation of the Radboud Faces Database</td><td>Presentation and validation of the Radboud Faces Database</td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=presentation and validation of the radboud faces database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td></tr><tr><td>ufdd</td><td>UFDD</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the limits of unconstrained face detection: a challenge dataset and baseline results&sort=relevance" target="_blank">[s2]</a></td><td>Johns Hopkins University</td><td>377f2b65e6a9300448bdccf678cde59449ecd337</td></tr><tr><td>vmu</td><td>VMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Can facial cosmetics affect the matching accuracy of face recognition systems?</td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Can facial cosmetics affect the matching accuracy of face recognition systems?</td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td></tr><tr><td>cuhk02</td><td>CUHK02</td><td>Locally Aligned Feature Transforms across Views</td><td>Locally Aligned Feature Transforms across Views</td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=locally aligned feature transforms across views&sort=relevance" target="_blank">[s2]</a></td><td></td><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.434" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a spatio-temporal appearance representation for video-based pedestrian re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Multi-Camera Activity Correlation Analysis</td><td>Multi-camera activity correlation analysis</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0163.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-camera activity correlation analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td></tr><tr><td>tvhi</td><td>TVHI</td><td>High Five: Recognising human interactions in TV shows</td><td>High Five: Recognising human interactions in TV shows</td><td><a href="http://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=high five: recognising human interactions in tv shows&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>MAXIMUM LIKELIHOOD TRAINING OF THE EMBEDDED HMM FOR FACE DETECTION AND RECOGNITION Ara V. Ne an and Monson H. Hayes III Center for Signal and Image Processing School of Electrical and Computer Engineering</td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>bio_id</td><td>BioID Face</td><td>Robust Face Detection Using the Hausdorff Distance</td><td>Robust Face Detection Using the Hausdorff Distance</td><td><a href="http://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face detection using the hausdorff distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4053e3423fb70ad9140ca89351df49675197196a</td></tr><tr><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td>Nighttime Face Recognition at Long Distance: Cross-distance and Cross-spectral Matching</td><td>Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</td><td><a href="http://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=nighttime face recognition at long distance: cross-distance and cross-spectral matching&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Scalable Person Re-identification: A Benchmark</td><td>Scalable Person Re-identification: A Benchmark</td><td><a href="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/01/ICCV15-ReIDDataset.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scalable person re-identification: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td></tr><tr><td>tud_multiview</td><td>TUD-Multiview</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>cuhk01</td><td>CUHK01</td><td>Human Reidentification with Transferred Metric Learning</td><td>Human Reidentification with Transferred Metric Learning</td><td><a href="http://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human reidentification with transferred metric learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td></tr><tr><td>wider_attribute</td><td>WIDER Attribute</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human attribute recognition by deep hierarchical contexts&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>44d23df380af207f5ac5b41459c722c87283e1eb</td></tr><tr><td>vadana</td><td>VADANA</td><td>VADANA: A dense dataset for facial image analysis</td><td>VADANA: A dense dataset for facial image analysis</td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vadana: a dense dataset for facial image analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td></tr><tr><td>jaffe</td><td>JAFFE</td><td>Coding Facial Expressions with Gabor Wavelets</td><td>Coding Facial Expressions with Gabor Wavelets</td><td><a href="http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=coding facial expressions with gabor wavelets&sort=relevance" target="_blank">[s2]</a></td><td>Kyushu University</td><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td></tr><tr><td>malf</td><td>MALF</td><td>Fine-grained Evaluation on Face Detection in the Wild.</td><td>Fine-grained evaluation on face detection in the wild</td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained evaluation on face detection in the wild.&sort=relevance" target="_blank">[s2]</a></td><td>Chinese Academy of Sciences</td><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Local descriptors encoded by Fisher vectors for person re-identification</td><td>Local Descriptors Encoded by Fisher Vectors for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/dd1d/51c3a59cb71cbfe1433ebeb4d973f7f9ddc1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=local descriptors encoded by fisher vectors for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td></tr><tr><td>fia</td><td>CMU FiA</td><td>The CMU Face In Action (FIA) Database</td><td>The CMU Face In Action (FIA) Database</td><td><a href="http://pdfs.semanticscholar.org/bb47/a03401811f9d2ca2da12138697acbc7b97a3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu face in action (fia) database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Kinship Verification through Transfer Learning</td><td>Kinship Verification through Transfer Learning</td><td><a href="http://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinship verification through transfer learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>apis</td><td>APiS1.0</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>svs</td><td>SVS</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>coco_action</td><td>COCO-a</td><td>Describing Common Human Visual Actions in Images</td><td>Describing Common Human Visual Actions in Images</td><td><a href="http://pdfs.semanticscholar.org/b38d/cf5fa5174c0d718d65cc4f3889b03c4a21df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing common human visual actions in images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td></tr><tr><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>czech_news_agency</td><td>UFI</td><td>Unconstrained Facial Images: Database for Face Recognition under Real-world Conditions</td><td>Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</td><td><a href="http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained facial images: database for face recognition under real-world conditions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b4106614c1d553365bad75d7866bff0de6056ed</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td><a href="http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facetracer: a search engine for large collections of images with faces&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>A 3 D Dynamic Database for Unconstrained Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Texas 3D Face Recognition Database</td><td>Texas 3D Face Recognition Database</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ssiai_may10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=texas 3d face recognition database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td></tr><tr><td>cohn_kanade_plus</td><td>CK+</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the extended cohn-kanade dataset (ck+): a complete dataset for action unit and emotion-specified expression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td>Learning Effective Human Pose Estimation from Inaccurate Annotation</td><td>Learning effective human pose estimation from inaccurate annotation</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995318" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning effective human pose estimation from inaccurate annotation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>soton</td><td>SOTON HiD</td><td>On a Large Sequence-Based Human Gait Database</td><td>On a large sequence-based human gait database</td><td><a href="http://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=on a large sequence-based human gait database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>Fashion Landmark Detection in the Wild</td><td>Fashion Landmark Detection in the Wild</td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fashion landmark detection in the wild&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td></tr><tr><td>violent_flows</td><td>Violent Flows</td><td>Violent Flows: Real-Time Detection of Violent Crowd Behavior</td><td>Violent flows: Real-time detection of violent crowd behavior</td><td><a href="http://www.wisdom.weizmann.ac.il/mathusers/kliper/Papers/violent_flows.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=violent flows: real-time detection of violent crowd behavior&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5194cbd51f9769ab25260446b4fa17204752e799</td></tr><tr><td>imsitu</td><td>imSitu</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td><a href="http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=situation recognition: visual semantic role labeling for image understanding&sort=relevance" target="_blank">[s2]</a></td><td>University of Washington</td><td>51eba481dac6b229a7490f650dff7b17ce05df73</td></tr><tr><td>wider_face</td><td>WIDER FACE</td><td>WIDER FACE: A Face Detection Benchmark</td><td>WIDER FACE: A Face Detection Benchmark</td><td><a href="https://arxiv.org/pdf/1511.06523.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wider face: a face detection benchmark&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td></tr><tr><td>bp4d_plus</td><td>BP4D+</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal spontaneous emotion corpus for human behavior analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td></tr><tr><td>reseed</td><td>ReSEED</td><td>ReSEED: Social Event dEtection Dataset</td><td>ReSEED: social event dEtection dataset</td><td><a href="https://pub.uni-bielefeld.de/download/2663466/2686734" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=reseed: social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>54983972aafc8e149259d913524581357b0f91c3</td></tr><tr><td>orl</td><td>ORL</td><td>Parameterisation of a Stochastic Model for Human Face Identification</td><td>Parameterisation of a stochastic model for human face identification</td><td><a href="http://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=parameterisation of a stochastic model for human face identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55206f0b5f57ce17358999145506cd01e570358c</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database : A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>youtube_faces</td><td>YouTubeFaces</td><td>Face Recognition in Unconstrained Videos with Matched Background Similarity</td><td>Face recognition in unconstrained videos with matched background similarity</td><td><a href="http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition in unconstrained videos with matched background similarity&sort=relevance" target="_blank">[s2]</a></td><td>Open University of Israel</td><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td></tr><tr><td>data_61</td><td>Data61 Pedestrian</td><td>A Multi-Modal Graphical Model for Scene Analysis</td><td>A Multi-modal Graphical Model for Scene Analysis</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2015.139" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-modal graphical model for scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>563c940054e4b456661762c1ab858e6f730c3159</td></tr><tr><td>cmdp</td><td>CMDP</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td><a href="http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=distance estimation of an unknown person from a portrait&sort=relevance" target="_blank">[s2]</a></td><td>California Institute of Technology</td><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td></tr><tr><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td>PAINFUL DATA: The UNBC-McMaster Shoulder Pain Expression Archive Database</td><td>Painful data: The UNBC-McMaster shoulder pain expression archive database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771462" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=painful data: the unbc-mcmaster shoulder pain expression archive database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Social LSTM: Human Trajectory Prediction in Crowded Spaces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>50_people_one_question</td><td>50 People One Question</td><td>Merging Pose Estimates Across Space and Time</td><td>Merging Pose Estimates Across Space and Time</td><td><a href="http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merging pose estimates across space and time&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><a href="https://pdfs.semanticscholar.org/be5b/455abd379240460d022a0e246615b0b86c14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance" target="_blank">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On – board Pedestrian Detection</td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>mot</td><td>MOT</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to associate: hybridboosted multi-target tracker for crowded scene&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>disfa</td><td>DISFA</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/T-AFFC.2013.4" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=disfa: a spontaneous facial action intensity database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td></tr><tr><td>wlfdb</td><td>WLFDB</td><td>WLFDB: Weakly Labeled Face Databases</td><td>WLFDB: Weakly Labeled Face Databases</td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wlfdb: weakly labeled face databases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td></tr><tr><td>coco</td><td>COCO</td><td>Microsoft COCO: Common Objects in Context</td><td>Microsoft COCO: Common Objects in Context</td><td><a href="http://pdfs.semanticscholar.org/71b7/178df5d2b112d07e45038cb5637208659ff7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=microsoft coco: common objects in context&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset</td><td>The Cityscapes Dataset</td><td><a href="http://pdfs.semanticscholar.org/5ffd/74d2873b7cba2cbc5fd295cc7fbdedca22a2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5ffd74d2873b7cba2cbc5fd295cc7fbdedca22a2</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition , Reacquisition , and Tracking</td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td>Pruning Training Sets for Learning of Object Categories</td><td>Pruning training sets for learning of object categories</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pruning training sets for learning of object categories&sort=relevance" target="_blank">[s2]</a></td><td></td><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td></tr><tr><td>bfm</td><td>BFM</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/6399/37b3a1b8bded3f7e9a40e85bd3770016cf3c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d face model for pose and illumination invariant face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</td><td><a href="https://arxiv.org/pdf/1605.09653.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>celeba</td><td>CelebA</td><td>Deep Learning Face Attributes in the Wild</td><td>Deep Learning Face Attributes in the Wild</td><td><a href="http://arxiv.org/pdf/1411.7766v2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep learning face attributes in the wild&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td></tr><tr><td>complex_activities</td><td>Ongoing Complex Activities</td><td>Recognition of Ongoing Complex Activities by Sequence Prediction over a Hierarchical Label Space</td><td>Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477586" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition of ongoing complex activities by sequence prediction over a hierarchical label space&sort=relevance" target="_blank">[s2]</a></td><td></td><td>65355cbb581a219bd7461d48b3afd115263ea760</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the sun attribute database: beyond categories for deeper scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>Face Swapping: Automatically Replacing Faces in Photographs</td><td>Face swapping: automatically replacing faces in photographs</td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face swapping: automatically replacing faces in photographs&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>670637d0303a863c1548d5b19f705860a23e285c</td></tr><tr><td>tud_brussels</td><td>TUD-Brussels</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_motionpairs</td><td>TUD-Motionparis</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>cuhk03</td><td>CUHK03</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepreid: deep filter pairing neural network for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td></tr><tr><td>ar_facedb</td><td>AR Face</td><td>The AR Face Database</td><td>The AR face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the ar face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td></tr><tr><td>agedb</td><td>AgeDB</td><td>AgeDB: the first manually collected, in-the-wild age database</td><td>AgeDB: The First Manually Collected, In-the-Wild Age Database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=agedb: the first manually collected, in-the-wild age database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>ward</td><td>WARD</td><td>Re-identify people in wide area camera network</td><td>Re-identify people in wide area camera network</td><td><a href="https://doi.org/10.1109/CVPRW.2012.6239203" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identify people in wide area camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><a href="https://arxiv.org/pdf/1705.07426.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>affectnet</td><td>AffectNet</td><td>AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</td><td>AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectnet: a database for facial expression, valence, and arousal computing in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td></tr><tr><td>pubfig</td><td>PubFig</td><td>Attribute and Simile Classifiers for Face Verification</td><td>Attribute and simile classifiers for face verification</td><td><a href="http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=attribute and simile classifiers for face verification&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td></tr><tr><td>fddb</td><td>FDDB</td><td>FDDB: A Benchmark for Face Detection in Unconstrained Settings</td><td>A Benchmark for Face Detection in Unconstrained Settings</td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fddb: a benchmark for face detection in unconstrained settings&sort=relevance" target="_blank">[s2]</a></td><td>University of Massachusetts</td><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td></tr><tr><td>urban_tribes</td><td>Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td><a href="http://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from bikers to surfers: visual recognition of urban tribes&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>774cbb45968607a027ae4729077734db000a1ec5</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>77c81c13a110a341c140995bedb98101b9e84f7f</td></tr><tr><td>berkeley_pose</td><td>BPAD</td><td>Describing People: A Poselet-Based Approach to Attribute Classification</td><td>Describing people: A poselet-based approach to attribute classification</td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing people: a poselet-based approach to attribute classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td></tr><tr><td>mapillary</td><td>Mapillary</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237796" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mapillary vistas dataset for semantic understanding of street scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td></tr><tr><td>nudedetection</td><td>Nude Detection</td><td>A Bag-of-Features Approach based on Hue-SIFT Descriptor for Nude Detection</td><td>A bag-of-features approach based on Hue-SIFT descriptor for nude detection</td><td><a href="http://ieeexplore.ieee.org/document/7077625/" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a bag-of-features approach based on hue-sift descriptor for nude detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild : A Survey</td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>vgg_celebs_in_places</td><td>CIP</td><td>Faces in Places: Compound Query Retrieval</td><td>Faces in Places: compound query retrieval</td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=faces in places: compound query retrieval&sort=relevance" target="_blank">[s2]</a></td><td>University of Oxford</td><td>7ebb153704706e457ab57b432793d2b6e5d12592</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>SUN Attribute Database: Discovering, Annotating, and Recognizing Scene Attributes</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sun attribute database: -discovering, annotating, and recognizing scene attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>svs</td><td>SVS</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Texas 3D Face Recognition Database</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=texas 3d face recognition database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Anthropometric 3D Face Recognition</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=anthropometric 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>tiny_faces</td><td>TinyFace</td><td>Low-Resolution Face Recognition</td><td>Low-Resolution Face Recognition</td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=low-resolution face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8990cdce3f917dad622e43e033db686b354d057c</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>towncenter</td><td>TownCenter</td><td>Stable Multi-Target Tracking in Real-Time Surveillance Video</td><td>Stable multi-target tracking in real-time surveillance video</td><td><a href="http://pdfs.semanticscholar.org/9361/b784e73e9238d5cefbea5ac40d35d1e3103f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stable multi-target tracking in real-time surveillance video&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td></tr><tr><td>tud_brussels</td><td>TUD-Brussels</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_campus</td><td>TUD-Campus</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_crossing</td><td>TUD-Crossing</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_motionpairs</td><td>TUD-Motionparis</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_multiview</td><td>TUD-Multiview</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tvhi</td><td>TVHI</td><td>High Five: Recognising human interactions in TV shows</td><td>High Five: Recognising human interactions in TV shows</td><td><a href="http://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=high five: recognising human interactions in tv shows&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td></tr><tr><td>uccs</td><td>UCCS</td><td>Large scale unconstrained open set face database</td><td>Large scale unconstrained open set face database</td><td><a href="http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large scale unconstrained open set face database&sort=relevance" target="_blank">[s2]</a></td><td>University of Colorado at Colorado Springs</td><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td></tr><tr><td>ucf_101</td><td>UCF101</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ucf101: a dataset of 101 human actions classes from videos in the wild&sort=relevance" target="_blank">[s2]</a></td><td>University of Central Florida</td><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td></tr><tr><td>ucf_crowd</td><td>UCF-CC-50</td><td>Multi-Source Multi-Scale Counting in Extremely Dense Crowd Images</td><td>Multi-source Multi-scale Counting in Extremely Dense Crowd Images</td><td><a href="http://www.cs.ucf.edu/~haroon/datafiles/Idrees_Counting_CVPR_2013.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-source multi-scale counting in extremely dense crowd images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td></tr><tr><td>ucf_selfie</td><td>UCF Selfie</td><td>How to Take a Good Selfie?</td><td>How to Take a Good Selfie?</td><td><a href="http://doi.acm.org/10.1145/2733373.2806365" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=how to take a good selfie?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td></tr><tr><td>ufdd</td><td>UFDD</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the limits of unconstrained face detection: a challenge dataset and baseline results&sort=relevance" target="_blank">[s2]</a></td><td>Johns Hopkins University</td><td>377f2b65e6a9300448bdccf678cde59449ecd337</td></tr><tr><td>umb</td><td>UMB</td><td>UMB-DB: A Database of Partially Occluded 3D Faces</td><td>UMB-DB: A database of partially occluded 3D faces</td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130509" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umb-db: a database of partially occluded 3d faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td></tr><tr><td>umd_faces</td><td>UMD</td><td>UMDFaces: An Annotated Face Dataset for Training Deep Networks</td><td>UMDFaces: An annotated face dataset for training deep networks</td><td><a href="http://arxiv.org/abs/1611.01484" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umdfaces: an annotated face dataset for training deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b05f65405534a696a847dd19c621b7b8588263</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><a href="https://arxiv.org/pdf/1705.07426.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td>PAINFUL DATA: The UNBC-McMaster Shoulder Pain Expression Archive Database</td><td>Painful data: The UNBC-McMaster shoulder pain expression archive database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771462" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=painful data: the unbc-mcmaster shoulder pain expression archive database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td></tr><tr><td>urban_tribes</td><td>Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td><a href="http://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from bikers to surfers: visual recognition of urban tribes&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>774cbb45968607a027ae4729077734db000a1ec5</td></tr><tr><td>used</td><td>USED Social Event Dataset</td><td>USED: A Large-scale Social Event Detection Dataset</td><td>USED: a large-scale social event detection dataset</td><td><a href="http://doi.acm.org/10.1145/2910017.2910624" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=used: a large-scale social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8627f019882b024aef92e4eb9355c499c733e5b7</td></tr><tr><td>v47</td><td>V47</td><td>Re-identification of Pedestrians with Variable Occlusion and Scale</td><td>Re-identification of pedestrians with variable occlusion and scale</td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130477" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identification of pedestrians with variable occlusion and scale&sort=relevance" target="_blank">[s2]</a></td><td></td><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td></tr><tr><td>vadana</td><td>VADANA</td><td>VADANA: A dense dataset for facial image analysis</td><td>VADANA: A dense dataset for facial image analysis</td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vadana: a dense dataset for facial image analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td></tr><tr><td>vgg_celebs_in_places</td><td>CIP</td><td>Faces in Places: Compound Query Retrieval</td><td>Faces in Places: compound query retrieval</td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=faces in places: compound query retrieval&sort=relevance" target="_blank">[s2]</a></td><td>University of Oxford</td><td>7ebb153704706e457ab57b432793d2b6e5d12592</td></tr><tr><td>vgg_faces</td><td>VGG Face</td><td>Deep Face Recognition</td><td>Deep Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td></tr><tr><td>vgg_faces2</td><td>VGG Face2</td><td>VGGFace2: A dataset for recognising faces across pose and age</td><td>VGGFace2: A Dataset for Recognising Faces across Pose and Age</td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vggface2: a dataset for recognising faces across pose and age&sort=relevance" target="_blank">[s2]</a></td><td>University of Oxford</td><td>eb027969f9310e0ae941e2adee2d42cdf07d938c</td></tr><tr><td>violent_flows</td><td>Violent Flows</td><td>Violent Flows: Real-Time Detection of Violent Crowd Behavior</td><td>Violent flows: Real-time detection of violent crowd behavior</td><td><a href="http://www.wisdom.weizmann.ac.il/mathusers/kliper/Papers/violent_flows.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=violent flows: real-time detection of violent crowd behavior&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5194cbd51f9769ab25260446b4fa17204752e799</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition , Reacquisition , and Tracking</td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>visual_phrases</td><td>Phrasal Recognition</td><td>Recognition using Visual Phrases</td><td>Recognition using visual phrases</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995711" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition using visual phrases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e8de844fefd54541b71c9823416daa238be65546</td></tr><tr><td>vmu</td><td>VMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Can facial cosmetics affect the matching accuracy of face recognition systems?</td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td></tr><tr><td>voc</td><td>VOC</td><td>The PASCAL Visual Object Classes (VOC) Challenge</td><td>The Pascal Visual Object Classes Challenge: A Retrospective</td><td><a href="http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the pascal visual object classes (voc) challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>abe9f3b91fd26fa1b50cd685c0d20debfb372f73</td></tr><tr><td>vqa</td><td>VQA</td><td>VQA: Visual Question Answering</td><td>VQA: Visual Question Answering</td><td><a href="http://arxiv.org/pdf/1505.00468v3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vqa: visual question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>01959ef569f74c286956024866c1d107099199f7</td></tr><tr><td>ward</td><td>WARD</td><td>Re-identify people in wide area camera network</td><td>Re-identify people in wide area camera network</td><td><a href="https://doi.org/10.1109/CVPRW.2012.6239203" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identify people in wide area camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><a href="http://doi.acm.org/10.1145/2996913.2996997" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>wider</td><td>WIDER</td><td>Recognize Complex Events from Static Images by Fusing Deep Channels</td><td>Recognize complex events from static images by fusing deep channels</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognize complex events from static images by fusing deep channels&sort=relevance" target="_blank">[s2]</a></td><td>Shenzhen Institutes of Advanced Technology</td><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td></tr><tr><td>wider_attribute</td><td>WIDER Attribute</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human attribute recognition by deep hierarchical contexts&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>44d23df380af207f5ac5b41459c722c87283e1eb</td></tr><tr><td>wider_face</td><td>WIDER FACE</td><td>WIDER FACE: A Face Detection Benchmark</td><td>WIDER FACE: A Face Detection Benchmark</td><td><a href="https://arxiv.org/pdf/1511.06523.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wider face: a face detection benchmark&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>77c81c13a110a341c140995bedb98101b9e84f7f</td></tr><tr><td>wlfdb</td><td></td><td>WLFDB: Weakly Labeled Face Databases</td><td>WLFDB: Weakly Labeled Face Databases</td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wlfdb: weakly labeled face databases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>Acquiring Linear Subspaces for Face Recognition under Variable Lighting</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=acquiring linear subspaces for face recognition under variable lighting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from few to many: illumination cone models for face recognition under variable lighting and pose&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yawdd</td><td>YawDD</td><td>YawDD: A Yawning Detection Dataset</td><td>YawDD: a yawning detection dataset</td><td><a href="http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yawdd: a yawning detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a94cae786d515d3450d48267e12ca954aab791c4</td></tr><tr><td>yfcc_100m</td><td>YFCC100M</td><td>YFCC100M: The New Data in Multimedia Research</td><td>The New Data and New Challenges in Multimedia Research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yfcc100m: the new data in multimedia research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a6e695ddd07aad719001c0fc1129328452385949</td></tr><tr><td>york_3d</td><td>UOY 3D Face Database</td><td>Three-Dimensional Face Recognition: An Eigensurface Approach</td><td>Three-dimensional face recognition: an eigensurface approach</td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=three-dimensional face recognition: an eigensurface approach&sort=relevance" target="_blank">[s2]</a></td><td></td><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td></tr><tr><td>youtube_faces</td><td>YouTubeFaces</td><td>Face Recognition in Unconstrained Videos with Matched Background Similarity</td><td>Face recognition in unconstrained videos with matched background similarity</td><td><a href="http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition in unconstrained videos with matched background similarity&sort=relevance" target="_blank">[s2]</a></td><td>Open University of Israel</td><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Automatic facial makeup detection with application in face recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Automatic Facial Makeup Detection with Application in Face Recognition</td><td>Automatic facial makeup detection with application in face recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic facial makeup detection with application in face recognition&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr><tr><td>youtube_poses</td><td>YouTube Pose</td><td>Personalizing Human Video Pose Estimation</td><td>Personalizing Human Video Pose Estimation</td><td><a href="http://arxiv.org/pdf/1511.06676v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=personalizing human video pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td></tr></table></body></html>
\ No newline at end of file +discovering, annotating, and recognizing scene attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>DEX: Deep EXpectation of apparent age from a single image</td><td>DEX: Deep EXpectation of Apparent Age from a Single Image</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=dex: deep expectation of apparent age from a single image&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td></tr><tr><td>awe_ears</td><td>AWE Ears</td><td>Ear Recognition: More Than a Survey</td><td>Ear Recognition: More Than a Survey</td><td><a href="http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ear recognition: more than a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>84fe5b4ac805af63206012d29523a1e033bc827e</td></tr><tr><td>casia_webface</td><td>CASIA Webface</td><td>Learning Face Representation from Scratch</td><td>Learning Face Representation from Scratch</td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning face representation from scratch&sort=relevance" target="_blank">[s2]</a></td><td></td><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td></tr><tr><td>used</td><td>USED Social Event Dataset</td><td>USED: A Large-scale Social Event Detection Dataset</td><td>USED: a large-scale social event detection dataset</td><td><a href="http://doi.acm.org/10.1145/2910017.2910624" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=used: a large-scale social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8627f019882b024aef92e4eb9355c499c733e5b7</td></tr><tr><td>tiny_faces</td><td>TinyFace</td><td>Low-Resolution Face Recognition</td><td>Low-Resolution Face Recognition</td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=low-resolution face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8990cdce3f917dad622e43e033db686b354d057c</td></tr><tr><td>mafl</td><td>MAFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td><a href="http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td><a href="http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td></tr><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>A new ranking method for principal components analysis and its application to face image analysis</td><td><a href="https://doi.org/10.1016/j.imavis.2009.11.005" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td></tr><tr><td>chalearn</td><td>ChaLearn</td><td>ChaLearn Looking at People: A Review of Events and Resources</td><td>ChaLearn looking at people: A review of events and resources</td><td><a href="https://arxiv.org/pdf/1701.02664.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=chalearn looking at people: a review of events and resources&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>The HDA+ data set for research on fully automated re-identification systems</td><td>The HDA+ Data Set for Research on Fully Automated Re-identification Systems</td><td><a href="http://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the hda+ data set for research on fully automated re-identification systems&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td></tr><tr><td>morph</td><td>MORPH Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>morph_nc</td><td>MORPH Non-Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>v47</td><td>V47</td><td>Re-identification of Pedestrians with Variable Occlusion and Scale</td><td>Re-identification of pedestrians with variable occlusion and scale</td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130477" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identification of pedestrians with variable occlusion and scale&sort=relevance" target="_blank">[s2]</a></td><td></td><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td></tr><tr><td>towncenter</td><td>TownCenter</td><td>Stable Multi-Target Tracking in Real-Time Surveillance Video</td><td>Stable multi-target tracking in real-time surveillance video</td><td><a href="http://pdfs.semanticscholar.org/9361/b784e73e9238d5cefbea5ac40d35d1e3103f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stable multi-target tracking in real-time surveillance video&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td></tr><tr><td>helen</td><td>Helen</td><td>Interactive Facial Feature Localization</td><td>Interactive Facial Feature Localization</td><td><a href="http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=interactive facial feature localization&sort=relevance" target="_blank">[s2]</a></td><td>University of Illinois, Urbana-Champaign</td><td>95f12d27c3b4914e0668a268360948bce92f7db3</td></tr><tr><td>4dfab</td><td>4DFAB</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=4dfab: a large scale 4d facial expression database for biometric applications&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td></tr><tr><td>megaface</td><td>MegaFace</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the megaface benchmark: 1 million faces for recognition at scale&sort=relevance" target="_blank">[s2]</a></td><td>University of Washington</td><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td></tr><tr><td>ilids_vid_reid</td><td>iLIDS-VID</td><td>Person Re-Identication by Video Ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identication by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>m2vts</td><td>m2vts</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the m2vts multimodal face database (release 1.00)&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9a9877791945c6fa4c1743ec6d3fb32570ef8481</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><a href="http://doi.acm.org/10.1145/2996913.2996997" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>facebook_100</td><td>Facebook100</td><td>Scaling Up Biologically-Inspired Computer Vision: A Case Study in Unconstrained Face Recognition on Facebook</td><td>Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scaling up biologically-inspired computer vision: a case study in unconstrained face recognition on facebook&sort=relevance" target="_blank">[s2]</a></td><td>Harvard University</td><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td></tr><tr><td>pubfig_83</td><td>pubfig83</td><td>Scaling Up Biologically-Inspired Computer Vision: A Case Study in Unconstrained Face Recognition on Facebook</td><td>Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scaling up biologically-inspired computer vision: a case study in unconstrained face recognition on facebook&sort=relevance" target="_blank">[s2]</a></td><td>Harvard University</td><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td></tr><tr><td>precarious</td><td>Precarious</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians With Adversarial Imposters</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</td><td><a href="https://arxiv.org/pdf/1703.06283.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=expecting the unexpected: training detectors for unusual pedestrians with adversarial imposters&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td></tr><tr><td>mafl</td><td>MAFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>moments_in_time</td><td>Moments in Time</td><td>Moments in Time Dataset: one million videos for event understanding</td><td>Moments in Time Dataset: one million videos for event understanding</td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=moments in time dataset: one million videos for event understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td></tr><tr><td>aflw</td><td>AFLW</td><td>Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=annotated facial landmarks in the wild: a large-scale, real-world database for facial landmark localization&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>market1203</td><td>Market 1203</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Orientation driven bag of appearances for person re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td></tr><tr><td>yawdd</td><td>YawDD</td><td>YawDD: A Yawning Detection Dataset</td><td>YawDD: a yawning detection dataset</td><td><a href="http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yawdd: a yawning detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a94cae786d515d3450d48267e12ca954aab791c4</td></tr><tr><td>mrp_drone</td><td>MRP Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td><a href="http://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating open-world person re-identification using a drone&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>The put face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>Collecting Large, Richly Annotated Facial-Expression Databases from Movies</td><td>Collecting Large, Richly Annotated Facial-Expression Databases from Movies</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=collecting large, richly annotated facial-expression databases from movies&sort=relevance" target="_blank">[s2]</a></td><td>Australian National University</td><td>b1f4423c227fa37b9680787be38857069247a307</td></tr><tr><td>ucf_101</td><td>UCF101</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ucf101: a dataset of 101 human actions classes from videos in the wild&sort=relevance" target="_blank">[s2]</a></td><td>University of Central Florida</td><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>Xm2vtsdb: the Extended M2vts Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database with age, pose and expression</td><td>Iranian Face Database with age, pose and expression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database with age, pose and expression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td></tr><tr><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td>A high resolution spontaneous 3D dynamic facial expression database</td><td>A high-resolution spontaneous 3D dynamic facial expression database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a high resolution spontaneous 3d dynamic facial expression database&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Binghamton</td><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td></tr><tr><td>pornodb</td><td>Pornography DB</td><td>Pooling in Image Representation: the Visual Codeword Point of View</td><td>Pooling in image representation: The visual codeword point of view</td><td><a href="http://pdfs.semanticscholar.org/b92a/1ed9622b8268ae3ac9090e25789fc41cc9b8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pooling in image representation: the visual codeword point of view&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td></tr><tr><td>scut_fbp</td><td>SCUT-FBP</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td><a href="https://arxiv.org/pdf/1511.02459.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scut-fbp: a benchmark dataset for facial beauty perception&sort=relevance" target="_blank">[s2]</a></td><td>South China University of Technology</td><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>mars</td><td>MARS</td><td>MARS: A Video Benchmark for Large-Scale Person Re-identification</td><td>MARS: A Video Benchmark for Large-Scale Person Re-Identification</td><td><a href="http://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=mars: a video benchmark for large-scale person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Crowdsourcing facial expressions for affective-interaction</td><td>Crowdsourcing facial expressions for affective-interaction</td><td><a href="https://doi.org/10.1016/j.cviu.2016.02.001" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=crowdsourcing facial expressions for affective-interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Hierarchical Temporal Graphical Model for Head Pose Estimation and Subsequent Attribute Classification in Real-World Videos</td><td>Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a5a3bc3e5e9753769163cb30b16dbd12e266b93e</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Robust Semi-automatic Head Pose Labeling for Real-World Face Video Sequences</td><td>Robust semi-automatic head pose labeling for real-world face video sequences</td><td><a href="https://doi.org/10.1007/s11042-012-1352-1" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust semi-automatic head pose labeling for real-world face video sequences&sort=relevance" target="_blank">[s2]</a></td><td>McGill University</td><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td></tr><tr><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td>Surveillance Face Recognition Challenge</td><td>Surveillance Face Recognition Challenge</td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=surveillance face recognition challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td></tr><tr><td>emotio_net</td><td>EmotioNet Database</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=emotionet: an accurate, real-time algorithm for the automatic annotation of a million facial expressions in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td></tr><tr><td>imfdb</td><td>IMFDB</td><td>Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations</td><td>Indian Movie Face Database: A benchmark for face recognition under wide variations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian movie face database: a benchmark for face recognition under wide variations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td></tr><tr><td>bu_3dfe</td><td>BU-3DFE</td><td>A 3D Facial Expression Database For Facial Behavior Research</td><td>A 3D facial expression database for facial behavior research</td><td><a href="http://www.cs.binghamton.edu/~lijun/Research/3DFE/Yin_FGR06_a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d facial expression database for facial behavior research&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Binghamton</td><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td></tr><tr><td>b3d_ac</td><td>B3D(AC)</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3-d audio-visual corpus of affective communication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td></tr><tr><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td>The Jiku Mobile Video Dataset</td><td>The jiku mobile video dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the jiku mobile video dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d178cde92ab3dc0dd2ebee5a76a33d556c39448b</td></tr><tr><td>stair_actions</td><td>STAIR Action</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stair actions: a video dataset of everyday home actions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td></tr><tr><td>megaage</td><td>MegaAge</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=quantifying facial age by posterior of age comparisons&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>d80a3d1f3a438e02a6685e66ee908446766fefa9</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td><a href="http://pdfs.semanticscholar.org/dc8b/25e35a3acb812beb499844734081722319b4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret database and evaluation procedure for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dc8b25e35a3acb812beb499844734081722319b4</td></tr><tr><td>families_in_the_wild</td><td>FIW</td><td>Visual Kinship Recognition of Families in the Wild</td><td>Visual Kinship Recognition of Families in the Wild</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337841" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=visual kinship recognition of families in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td></tr><tr><td>scut_head</td><td>SCUT HEAD</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting heads using feature refine net and cascaded multi-scale architecture&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td></tr><tr><td>sarc3d</td><td>Sarc3D</td><td>SARC3D: a new 3D body model for People Tracking and Re-identification</td><td>SARC3D: A New 3D Body Model for People Tracking and Re-identification</td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sarc3d: a new 3d body model for people tracking and re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 faces In-the-wild challenge: Database and results</td><td>300 Faces In-The-Wild Challenge: database and results</td><td><a href="http://doi.org/10.1016/j.imavis.2016.01.002" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: database and results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e4754afaa15b1b53e70743880484b8d0736990ff</td></tr><tr><td>gfw</td><td>YouTube Pose</td><td>Merge or Not? Learning to Group Faces via Imitation Learning</td><td>Merge or Not? Learning to Group Faces via Imitation Learning</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merge or not? learning to group faces via imitation learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td></tr><tr><td>visual_phrases</td><td>Phrasal Recognition</td><td>Recognition using Visual Phrases</td><td>Recognition using visual phrases</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995711" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition using visual phrases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e8de844fefd54541b71c9823416daa238be65546</td></tr><tr><td>mpi_large</td><td>Large MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpi_small</td><td>Small MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>vgg_faces2</td><td>VGG Face2</td><td>VGGFace2: A dataset for recognising faces across pose and age</td><td>VGGFace2: A Dataset for Recognising Faces across Pose and Age</td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vggface2: a dataset for recognising faces across pose and age&sort=relevance" target="_blank">[s2]</a></td><td>University of Oxford</td><td>eb027969f9310e0ae941e2adee2d42cdf07d938c</td></tr><tr><td>msmt_17</td><td>MSMT17</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td><a href="https://arxiv.org/pdf/1711.08565.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person transfer gan to bridge domain gap for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ec792ad2433b6579f2566c932ee414111e194537</td></tr><tr><td>europersons</td><td>EuroCity Persons</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the eurocity persons dataset: a novel benchmark for object detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td></tr><tr><td>mug_faces</td><td>MUG Faces</td><td>The MUG Facial Expression Database</td><td>The MUG facial expression database</td><td><a href="http://ieeexplore.ieee.org/document/5617662/" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mug facial expression database&sort=relevance" target="_blank">[s2]</a></td><td>Aristotle University of Thessaloniki</td><td>f1af714b92372c8e606485a3982eab2f16772ad8</td></tr><tr><td>pku</td><td>PKU</td><td>Swiss-System Based Cascade Ranking for Gait-based Person Re-identification</td><td>Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</td><td><a href="http://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=swiss-system based cascade ranking for gait-based person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: an evaluation of the state of the art&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td></tr><tr><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td>Automated human identification using ear imaging</td><td>Automated Human Identification Using Ear Imaging</td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automated human identification using ear imaging&sort=relevance" target="_blank">[s2]</a></td><td></td><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td></tr><tr><td>miw</td><td>MIW</td><td>Automatic Facial Makeup Detection with Application in Face Recognition</td><td>Automatic facial makeup detection with application in face recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic facial makeup detection with application in face recognition&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Automatic Facial Makeup Detection with Application in Face Recognition</td><td>Automatic facial makeup detection with application in face recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic facial makeup detection with application in face recognition&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr><tr><td>nd_2006</td><td>ND-2006</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401928" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=using a multi-instance enrollment representation to improve 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td></tr></table></body></html>
\ No newline at end of file diff --git a/scraper/reports/paper_title_report_no_location.html b/scraper/reports/paper_title_report_no_location.html index 33de04e4..66e93e87 100644 --- a/scraper/reports/paper_title_report_no_location.html +++ b/scraper/reports/paper_title_report_no_location.html @@ -1,9 +1,3 @@ -<!doctype html><html><head><meta charset='utf-8'><title>Papers with no location</title><link rel='stylesheet' href='reports.css'></head><body><h2>Papers with no location</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>3d_rma</td><td>3D-RMA</td><td>Automatic 3D Face Authentication</td><td>Automatic 3D face authentication</td><td><a href="http://pdfs.semanticscholar.org/2160/788824c4c29ffe213b2cbeb3f52972d73f37.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic 3d face authentication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>A 3 D Dynamic Database for Unconstrained Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td></tr><tr><td>3dpes</td><td>3DPeS</td><td>3DPes: 3D People Dataset for Surveillance and Forensics</td><td>3DPeS: 3D people dataset for surveillance and forensics</td><td><a href="http://doi.acm.org/10.1145/2072572.2072590" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=3dpes: 3d people dataset for surveillance and forensics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td></tr><tr><td>4dfab</td><td>4DFAB</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=4dfab: a large scale 4d facial expression database for biometric applications&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td></tr><tr><td>50_people_one_question</td><td>50 People One Question</td><td>Merging Pose Estimates Across Space and Time</td><td>Merging Pose Estimates Across Space and Time</td><td><a href="http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merging pose estimates across space and time&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td></tr><tr><td>a_pascal_yahoo</td><td>aPascal</td><td>Describing Objects by their Attributes</td><td>Describing objects by their attributes</td><td><a href="http://www-2.cs.cmu.edu/~dhoiem/publications/cvpr2009_attributes.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing objects by their attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td></tr><tr><td>adience</td><td>Adience</td><td>Age and Gender Estimation of Unfiltered Faces</td><td>Age and Gender Estimation of Unfiltered Faces</td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=age and gender estimation of unfiltered faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td><a href="http://pdfs.semanticscholar.org/2624/d84503bc2f8e190e061c5480b6aa4d89277a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=afew-va database for valence and arousal estimation in-the-wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td></tr><tr><td>affectnet</td><td>AffectNet</td><td>AffectNet: A New Database for Facial Expression, Valence, and Arousal Computation in the Wild</td><td>Skybiometry and AffectNet on Facial Emotion Recognition Using Supervised Machine Learning Algorithms</td><td><a href="http://dl.acm.org/citation.cfm?id=3232665" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectnet: a new database for facial expression, valence, and arousal computation in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f152b6ee251cca940dd853c54e6a7b78fbc6b235</td></tr><tr><td>aflw</td><td>AFLW</td><td>Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=annotated facial landmarks in the wild: a large-scale, real-world database for facial landmark localization&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>agedb</td><td>AgeDB</td><td>AgeDB: the first manually collected, in-the-wild age database</td><td>AgeDB: The First Manually Collected, In-the-Wild Age Database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=agedb: the first manually collected, in-the-wild age database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</td><td><a href="https://arxiv.org/pdf/1605.09653.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>apis</td><td>APiS1.0</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>ar_facedb</td><td>AR Face</td><td>The AR Face Database</td><td>The AR face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the ar face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td></tr><tr><td>awe_ears</td><td>AWE Ears</td><td>Ear Recognition: More Than a Survey</td><td>Ear Recognition: More Than a Survey</td><td><a href="http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ear recognition: more than a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>84fe5b4ac805af63206012d29523a1e033bc827e</td></tr><tr><td>b3d_ac</td><td>B3D(AC)</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3-d audio-visual corpus of affective communication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td></tr><tr><td>bbc_pose</td><td>BBC Pose</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td><a href="https://doi.org/10.1007/s11263-013-0672-6" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic and efficient human pose estimation for sign language videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>213a579af9e4f57f071b884aa872651372b661fd</td></tr><tr><td>berkeley_pose</td><td>BPAD</td><td>Describing People: A Poselet-Based Approach to Attribute Classification</td><td>Describing people: A poselet-based approach to attribute classification</td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing people: a poselet-based approach to attribute classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td></tr><tr><td>bfm</td><td>BFM</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/6399/37b3a1b8bded3f7e9a40e85bd3770016cf3c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d face model for pose and illumination invariant face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td></tr><tr><td>bio_id</td><td>BioID Face</td><td>Robust Face Detection Using the Hausdorff Distance</td><td>Robust Face Detection Using the Hausdorff Distance</td><td><a href="http://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face detection using the hausdorff distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4053e3423fb70ad9140ca89351df49675197196a</td></tr><tr><td>bjut_3d</td><td>BJUT-3D</td><td>The BJUT-3D Large-Scale Chinese Face Database</td><td>A novel face recognition method based on 3D face model</td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the bjut-3d large-scale chinese face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td></tr><tr><td>bosphorus</td><td>The Bosphorus</td><td>Bosphorus Database for 3D Face Analysis</td><td>Bosphorus Database for 3D Face Analysis</td><td><a href="http://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=bosphorus database for 3d face analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2acf7e58f0a526b957be2099c10aab693f795973</td></tr><tr><td>bp4d_plus</td><td>BP4D+</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal spontaneous emotion corpus for human behavior analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td></tr><tr><td>brainwash</td><td>Brainwash</td><td>Brainwash dataset</td><td>Brainwash: A Data System for Feature Engineering</td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=brainwash dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td></tr><tr><td>buhmap_db</td><td>BUHMAP-DB</td><td>Facial Feature Tracking and Expression Recognition for Sign Language</td><td>Facial feature tracking and expression recognition for sign language</td><td><a href="http://www.cmpe.boun.edu.tr/pilab/pilabfiles/databases/buhmap/files/ari2008facialfeaturetracking.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial feature tracking and expression recognition for sign language&sort=relevance" target="_blank">[s2]</a></td><td></td><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td></tr><tr><td>cafe</td><td>CAFE</td><td>The Child Affective Facial Expression (CAFE) Set: Validity and reliability from untrained adults</td><td>The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</td><td><a href="http://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the child affective facial expression (cafe) set: validity and reliability from untrained adults&sort=relevance" target="_blank">[s2]</a></td><td></td><td>20388099cc415c772926e47bcbbe554e133343d1</td></tr><tr><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td>Pruning Training Sets for Learning of Object Categories</td><td>Pruning training sets for learning of object categories</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pruning training sets for learning of object categories&sort=relevance" target="_blank">[s2]</a></td><td></td><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: A Benchmark</td><td>Pedestrian detection: A benchmark</td><td><a href="http://vision.ucsd.edu/~pdollar/files/papers/DollarCVPR09peds.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: an evaluation of the state of the art&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td></tr><tr><td>camel</td><td>CAMEL</td><td>CAMEL Dataset for Visual and Thermal Infrared Multiple Object Detection and Tracking</td><td>Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=camel dataset for visual and thermal infrared multiple object detection and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td></tr><tr><td>cas_peal</td><td>CAS-PEAL</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cas-peal large-scale chinese face database and baseline evaluations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>casia_webface</td><td>CASIA Webface</td><td>Learning Face Representation from Scratch</td><td>Learning Face Representation from Scratch</td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning face representation from scratch&sort=relevance" target="_blank">[s2]</a></td><td></td><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>chalearn</td><td>ChaLearn</td><td>ChaLearn Looking at People: A Review of Events and Resources</td><td>ChaLearn looking at people: A review of events and resources</td><td><a href="https://arxiv.org/pdf/1701.02664.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=chalearn looking at people: a review of events and resources&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td><a href="https://arxiv.org/pdf/1604.01685.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset for semantic urban scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td><a href="https://arxiv.org/pdf/1604.01685.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td></tr><tr><td>clothing_co_parsing</td><td>CCP</td><td>Clothing Co-Parsing by Joint Image Segmentation and Labeling</td><td>Clothing Co-parsing by Joint Image Segmentation and Labeling</td><td><a href="https://arxiv.org/pdf/1502.00739v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing co-parsing by joint image segmentation and labeling&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2bf8541199728262f78d4dced6fb91479b39b738</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>coco</td><td>COCO</td><td>Microsoft COCO: Common Objects in Context</td><td>Microsoft COCO Captions: Data Collection and Evaluation Server</td><td><a href="http://pdfs.semanticscholar.org/ba95/81c33a7eebe87c50e61763e4c8d1723538f2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=microsoft coco: common objects in context&sort=relevance" target="_blank">[s2]</a></td><td></td><td>696ca58d93f6404fea0fc75c62d1d7b378f47628</td></tr><tr><td>coco_action</td><td>COCO-a</td><td>Describing Common Human Visual Actions in Images</td><td>Describing Common Human Visual Actions in Images</td><td><a href="http://pdfs.semanticscholar.org/b38d/cf5fa5174c0d718d65cc4f3889b03c4a21df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing common human visual actions in images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td></tr><tr><td>coco_qa</td><td>COCO QA</td><td>Exploring Models and Data for Image Question Answering</td><td>Exploring Models and Data for Image Question Answering</td><td><a href="http://pdfs.semanticscholar.org/aa79/9c29c0d44ece1864467af520fe70540c069b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring models and data for image question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td></tr><tr><td>cofw</td><td>COFW</td><td>Robust face landmark estimation under occlusion</td><td>Robust Face Landmark Estimation under Occlusion</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face landmark estimation under occlusion&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2724ba85ec4a66de18da33925e537f3902f21249</td></tr><tr><td>cohn_kanade_plus</td><td>CK+</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the extended cohn-kanade dataset (ck+): a complete dataset for action unit and emotion-specified expression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td></tr><tr><td>complex_activities</td><td>Ongoing Complex Activities</td><td>Recognition of Ongoing Complex Activities by Sequence Prediction over a Hierarchical Label Space</td><td>Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477586" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition of ongoing complex activities by sequence prediction over a hierarchical label space&sort=relevance" target="_blank">[s2]</a></td><td></td><td>65355cbb581a219bd7461d48b3afd115263ea760</td></tr><tr><td>cuhk01</td><td>CUHK01</td><td>Human Reidentification with Transferred Metric Learning</td><td>Human Reidentification with Transferred Metric Learning</td><td><a href="http://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human reidentification with transferred metric learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td></tr><tr><td>cuhk02</td><td>CUHK02</td><td>Locally Aligned Feature Transforms across Views</td><td>Locally Aligned Feature Transforms across Views</td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=locally aligned feature transforms across views&sort=relevance" target="_blank">[s2]</a></td><td></td><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td></tr><tr><td>cuhk03</td><td>CUHK03</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepreid: deep filter pairing neural network for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On – board Pedestrian Detection</td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>czech_news_agency</td><td>UFI</td><td>Unconstrained Facial Images: Database for Face Recognition under Real-world Conditions</td><td>Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</td><td><a href="http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained facial images: database for face recognition under real-world conditions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b4106614c1d553365bad75d7866bff0de6056ed</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>data_61</td><td>Data61 Pedestrian</td><td>A Multi-Modal Graphical Model for Scene Analysis</td><td>A Multi-modal Graphical Model for Scene Analysis</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2015.139" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-modal graphical model for scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>563c940054e4b456661762c1ab858e6f730c3159</td></tr><tr><td>disfa</td><td>DISFA</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td>Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=disfa: a spontaneous facial action intensity database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a5acda0e8c0937bfed013e6382da127103e41395</td></tr><tr><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td>Nighttime Face Recognition at Long Distance: Cross-distance and Cross-spectral Matching</td><td>Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</td><td><a href="http://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=nighttime face recognition at long distance: cross-distance and cross-spectral matching&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>emotio_net</td><td>EmotioNet Database</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=emotionet: an accurate, real-time algorithm for the automatic annotation of a million facial expressions in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td></tr><tr><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td>Depth and Appearance for Mobile Scene Analysis</td><td>Depth and Appearance for Mobile Scene Analysis</td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07.pdf/at_download/file" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=depth and appearance for mobile scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td></tr><tr><td>europersons</td><td>EuroCity Persons</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the eurocity persons dataset: a novel benchmark for object detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td></tr><tr><td>expw</td><td>ExpW</td><td>Learning Social Relation Traits from Face Images</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="http://arxiv.org/abs/1609.06426" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>expw</td><td>ExpW</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="http://arxiv.org/abs/1609.06426" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>faceplace</td><td>Face Place</td><td>Recognizing disguised faces</td><td>Recognizing disguised faces</td><td><a href="http://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognizing disguised faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td></tr><tr><td>families_in_the_wild</td><td>FIW</td><td>Visual Kinship Recognition of Families in the Wild</td><td>Visual Kinship Recognition of Families in the Wild</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337841" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=visual kinship recognition of families in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>Generalização cartográfica automatizada para um banco de dados cadastral</td><td><a href="https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret verification testing protocol for face recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td><a href="http://pdfs.semanticscholar.org/dc8b/25e35a3acb812beb499844734081722319b4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret database and evaluation procedure for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dc8b25e35a3acb812beb499844734081722319b4</td></tr><tr><td>feret</td><td>FERET</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td><a href="http://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=feret ( face recognition technology ) recognition algorithm development and test results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret evaluation methodology for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td></tr><tr><td>ferplus</td><td>FER+</td><td>Training Deep Networks for Facial Expression Recognition with Crowd-Sourced Label Distribution</td><td>Training deep networks for facial expression recognition with crowd-sourced label distribution</td><td><a href="http://arxiv.org/pdf/1608.01041v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=training deep networks for facial expression recognition with crowd-sourced label distribution&sort=relevance" target="_blank">[s2]</a></td><td></td><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td></tr><tr><td>fia</td><td>CMU FiA</td><td>The CMU Face In Action (FIA) Database</td><td>The CMU Face In Action (FIA) Database</td><td><a href="http://pdfs.semanticscholar.org/bb47/a03401811f9d2ca2da12138697acbc7b97a3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu face in action (fia) database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td></tr><tr><td>frav3d</td><td>FRAV3D</td><td>MULTIMODAL 2D, 2.5D & 3D FACE VERIFICATION</td><td>2D and 3D face recognition: A survey</td><td><a href="http://pdfs.semanticscholar.org/2f5d/44dc3e1b5955942133ff872ebd31716ec604.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal 2d, 2.5d & 3d face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2f5d44dc3e1b5955942133ff872ebd31716ec604</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>gallagher</td><td>Gallagher</td><td>Clothing Cosegmentation for Recognizing People</td><td>Clothing Cosegmentation for Shopping Images With Cluttered Background</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7423747" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing cosegmentation for recognizing people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dbe8e5121c534339d6e41f8683e85f87e6abf81</td></tr><tr><td>gavab_db</td><td>Gavab</td><td>GavabDB: a 3D face database</td><td>Expression invariant 3D face recognition with a Morphable Model</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gavabdb: a 3d face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>MAXIMUM LIKELIHOOD TRAINING OF THE EMBEDDED HMM FOR FACE DETECTION AND RECOGNITION Ara V. Ne an and Monson H. Hayes III Center for Signal and Image Processing School of Electrical and Computer Engineering</td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Generic Object Recognition with Boosting</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=generic object recognition with boosting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=weak hypotheses and boosting for generic object detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Object Recognition Using Segmentation for Feature Detection</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object recognition using segmentation for feature detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>h3d</td><td>H3D</td><td>Poselets: Body Part Detectors Trained Using 3D Human Pose Annotations</td><td>Poselets: Body part detectors trained using 3D human pose annotations</td><td><a href="http://vision.stanford.edu/teaching/cs231b_spring1213/papers/ICCV09_BourdevMalik.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=poselets: body part detectors trained using 3d human pose annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2830fb5282de23d7784b4b4bc37065d27839a412</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>The HDA+ data set for research on fully automated re-identification systems</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the hda+ data set for research on fully automated re-identification systems&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>High-resolution comprehensive 3-D dynamic database for facial articulation analysis</td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td></tr><tr><td>hollywood_headset</td><td>HollywoodHeads</td><td>Context-aware CNNs for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware cnns for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=is the eye region more reliable than the face? a preliminary study of face-based recognition on a transgender dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Investigating the Periocular-Based Face Recognition Across Gender Transformation</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating the periocular-based face recognition across gender transformation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Face recognition across gender transformation using SVM Classifier</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition across gender transformation using svm classifier&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database : A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database with age, pose and expression</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database with age, pose and expression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database and evaluation with a new detection algorithm&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td>Automated human identification using ear imaging</td><td>Automated Human Identification Using Ear Imaging</td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automated human identification using ear imaging&sort=relevance" target="_blank">[s2]</a></td><td></td><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td></tr><tr><td>ijb_c</td><td>IJB-A</td><td>Pushing the Frontiers of Unconstrained Face Detection and Recognition: IARPA Janus Benchmark A</td><td>Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the frontiers of unconstrained face detection and recognition: iarpa janus benchmark a&sort=relevance" target="_blank">[s2]</a></td><td></td><td>140c95e53c619eac594d70f6369f518adfea12ef</td></tr><tr><td>ijb_c</td><td>IJB-B</td><td>IARPA Janus Benchmark-B Face Dataset</td><td>IARPA Janus Benchmark-B Face Dataset</td><td><a href="http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark-b face dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>ilids_mcts</td><td></td><td>Imagery Library for Intelligent Detection Systems: -The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems: -the i-lids user guide&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ilids_vid_reid</td><td>iLIDS-VID</td><td>Person Re-Identication by Video Ranking</td><td>Person Re-identification by Exploiting Spatio-Temporal Cues and Multi-view Metric Learning</td><td><a href="https://doi.org/10.1109/LSP.2016.2574323" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identication by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>99eb4cea0d9bc9fe777a5c5172f8638a37a7f262</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>Deep expectation of real and apparent age from a single image without facial landmarks</td><td>Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</td><td><a href="https://doi.org/10.1007/s11263-016-0940-3" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep expectation of real and apparent age from a single image without facial landmarks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10195a163ab6348eef37213a46f60a3d87f289c5</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>DEX: Deep EXpectation of apparent age from a single image</td><td>DEX: Deep EXpectation of Apparent Age from a Single Image</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=dex: deep expectation of apparent age from a single image&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td></tr><tr><td>imfdb</td><td>IMFDB</td><td>Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations</td><td>Indian Movie Face Database: A benchmark for face recognition under wide variations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian movie face database: a benchmark for face recognition under wide variations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td></tr><tr><td>imm_face</td><td>IMM Face Dataset</td><td>The IMM Face Database - An Annotated Dataset of 240 Face Images</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the imm face database - an annotated dataset of 240 face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>immediacy</td><td>Immediacy</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.383" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-task recurrent neural network for immediacy prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td></tr><tr><td>inria_person</td><td>INRIA Pedestrian</td><td>Histograms of Oriented Gradients for Human Detection</td><td>Histograms of oriented gradients for human detection</td><td><a href="http://nichol.as/papers/Dalai/Histograms%20of%20oriented%20gradients%20for%20human%20detection.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=histograms of oriented gradients for human detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td></tr><tr><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td>The Jiku Mobile Video Dataset</td><td>A Synchronization Ground Truth for the Jiku Mobile Video Dataset</td><td><a href="http://pdfs.semanticscholar.org/ad62/c6e17bc39b4dec20d32f6ac667ae42d2c118.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the jiku mobile video dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ad62c6e17bc39b4dec20d32f6ac667ae42d2c118</td></tr><tr><td>jpl_pose</td><td>JPL-Interaction dataset</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Ryoo_First-Person_Activity_Recognition_2013_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=first-person activity recognition: what are they doing to me?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td></tr><tr><td>kdef</td><td>KDEF</td><td>The Karolinska Directed Emotional Faces – KDEF</td><td>Gaze fixation and the neural circuitry of face processing in autism</td><td><a href="http://doi.org/10.1038/nn1421" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the karolinska directed emotional faces – kdef&sort=relevance" target="_blank">[s2]</a></td><td></td><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Genealogical Face Recognition based on UB KinFace Database</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=genealogical face recognition based on ub kinface database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Kinship Verification through Transfer Learning</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinship verification through transfer learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Understanding Kin Relationships in a Photo</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding kin relationships in a photo&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kinectface</td><td>KinectFaceDB</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinectfacedb: a kinect database for face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>The Role of Machine Vision for Intelligent Vehicles</td><td><a href="http://www.path.berkeley.edu/sites/default/files/my_folder_76/Pub_03.2016_Role.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>35ba4ebfd017a56b51e967105af9ae273c9b0178</td></tr><tr><td>lag</td><td>LAG</td><td>Large Age-Gap Face Verification by Feature Injection in Deep Networks</td><td>Large age-gap face verification by feature injection in deep networks</td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large age-gap face verification by feature injection in deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td></tr><tr><td>large_scale_person_search</td><td>Large Scale Person Search</td><td>End-to-End Deep Learning for Person Search</td><td>End-to-End Deep Learning for Person Search</td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end deep learning for person search&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td></tr><tr><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td>Learning Effective Human Pose Estimation from Inaccurate Annotation</td><td>Learning effective human pose estimation from inaccurate annotation</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995318" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning effective human pose estimation from inaccurate annotation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild : A Survey</td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>lfw_a</td><td>LFW-a</td><td>Effective Unconstrained Face Recognition by - Combining Multiple Descriptors and Learned - Background Statistics</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=effective unconstrained face recognition by - combining multiple descriptors and learned - background statistics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>133f01aec1534604d184d56de866a4bd531dac87</td></tr><tr><td>m2vts</td><td>m2vts</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the m2vts multimodal face database (release 1.00)&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>mafl</td><td>MAFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mafl</td><td>MAFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mapillary</td><td>Mapillary</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237796" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mapillary vistas dataset for semantic understanding of street scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Scalable Person Re-identification: A Benchmark</td><td>Scalable Person Re-identification: A Benchmark</td><td><a href="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/01/ICCV15-ReIDDataset.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scalable person re-identification: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td></tr><tr><td>market1203</td><td>Market 1203</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>mars</td><td>MARS</td><td>MARS: A Video Benchmark for Large-Scale Person Re-identification</td><td>MARS: A Video Benchmark for Large-Scale Person Re-Identification</td><td><a href="http://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=mars: a video benchmark for large-scale person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td></tr><tr><td>megaface</td><td>MegaFace</td><td>Level Playing Field for Million Scale Face Recognition</td><td>Level Playing Field for Million Scale Face Recognition</td><td><a href="https://arxiv.org/pdf/1705.00393.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=level playing field for million scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td></tr><tr><td>mit_cbcl</td><td>MIT CBCL</td><td>Component-based Face Recognition with 3D Morphable Models</td><td>Component-Based Face Recognition with 3D Morphable Models</td><td><a href="http://www.bheisele.com/avbpa2003.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=component-based face recognition with 3d morphable models&sort=relevance" target="_blank">[s2]</a></td><td></td><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td></tr><tr><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td>WEB-BASED DATABASE FOR FACIAL EXPRESSION ANALYSIS</td><td>Web-based database for facial expression analysis</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ICME2005-final.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=web-based database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td></tr><tr><td>moments_in_time</td><td>Moments in Time</td><td>Moments in Time Dataset: one million videos for event understanding</td><td>Moments in Time Dataset: one million videos for event understanding</td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=moments in time dataset: one million videos for event understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td></tr><tr><td>morph</td><td>MORPH Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>morph_nc</td><td>MORPH Non-Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mot</td><td>MOT</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to associate: hybridboosted multi-target tracker for crowded scene&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mpi_large</td><td>Large MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpi_small</td><td>Small MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpii_human_pose</td><td>MPII Human Pose</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909866" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=2d human pose estimation: new benchmark and state of the art analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><a href="https://pdfs.semanticscholar.org/be5b/455abd379240460d022a0e246615b0b86c14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance" target="_blank">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>mrp_drone</td><td>MRP Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td><a href="http://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating open-world person re-identification using a drone&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td></tr><tr><td>msceleb</td><td>MsCeleb</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ms-celeb-1m: a dataset and benchmark for large-scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>291265db88023e92bb8c8e6390438e5da148e8f5</td></tr><tr><td>msmt_17</td><td>MSMT17</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td><a href="https://arxiv.org/pdf/1711.08565.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person transfer gan to bridge domain gap for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ec792ad2433b6579f2566c932ee414111e194537</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>muct</td><td>MUCT</td><td>The MUCT Landmarked Face Database</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the muct landmarked face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</td><td><a href="http://dl.acm.org/citation.cfm?id=2337184" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance" target="_blank">[s2]</a></td><td></td><td>109df0e8e5969ddf01e073143e83599228a1163f</td></tr><tr><td>names_and_faces_news</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>nd_2006</td><td>ND-2006</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401928" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=using a multi-instance enrollment representation to improve 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Crowdsourcing facial expressions for affective-interaction</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=crowdsourcing facial expressions for affective-interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>nudedetection</td><td>Nude Detection</td><td>A Bag-of-Features Approach based on Hue-SIFT Descriptor for Nude Detection</td><td>A bag-of-features approach based on Hue-SIFT descriptor for nude detection</td><td><a href="http://ieeexplore.ieee.org/document/7077625/" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a bag-of-features approach based on hue-sift descriptor for nude detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td></tr><tr><td>orl</td><td>ORL</td><td>Parameterisation of a Stochastic Model for Human Face Identification</td><td>Parameterisation of a stochastic model for human face identification</td><td><a href="http://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=parameterisation of a stochastic model for human face identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55206f0b5f57ce17358999145506cd01e570358c</td></tr><tr><td>penn_fudan</td><td>Penn Fudan</td><td>Object Detection Combining Recognition and Segmentation</td><td>Object Detection Combining Recognition and Segmentation</td><td><a href="http://pdfs.semanticscholar.org/f531/a554cade14b9b340de6730683a28c292dd74.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object detection combining recognition and segmentation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td></tr><tr><td>peta</td><td>PETA</td><td>Pedestrian Attribute Recognition At Far Distance</td><td>Pedestrian Attribute Recognition At Far Distance</td><td><a href="http://personal.ie.cuhk.edu.hk/~pluo/pdf/mm14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute recognition at far distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td></tr><tr><td>pets</td><td>PETS 2017</td><td>PETS 2017: Dataset and Challenge</td><td>PETS 2017: Dataset and Challenge</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014998" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pets 2017: dataset and challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classication</td><td>Summary of Research on Informant Accuracy in Network Data, 11 and on the Reverse Small World Problem</td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td></tr><tr><td>pipa</td><td>PIPA</td><td>Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues</td><td>Beyond frontal faces: Improving Person Recognition using multiple cues</td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=beyond frontal faces: improving person recognition using multiple cues&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0a85bdff552615643dd74646ac881862a7c7072d</td></tr><tr><td>pku</td><td>PKU</td><td>Swiss-System Based Cascade Ranking for Gait-based Person Re-identification</td><td>Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</td><td><a href="http://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=swiss-system based cascade ranking for gait-based person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Orientation driven bag of appearances for person re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>pornodb</td><td>Pornography DB</td><td>Pooling in Image Representation: the Visual Codeword Point of View</td><td>Pooling in image representation: The visual codeword point of view</td><td><a href="http://pdfs.semanticscholar.org/b92a/1ed9622b8268ae3ac9090e25789fc41cc9b8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pooling in image representation: the visual codeword point of view&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td></tr><tr><td>precarious</td><td>Precarious</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians With Adversarial Imposters</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</td><td><a href="https://arxiv.org/pdf/1703.06283.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=expecting the unexpected: training detectors for unusual pedestrians with adversarial imposters&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td></tr><tr><td>prid</td><td>PRID</td><td>Person Re-Identification by Descriptive and Discriminative Classification</td><td>Person Re-identification by Descriptive and Discriminative Classification</td><td><a href="http://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification by descriptive and discriminative classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td></tr><tr><td>psu</td><td>PSU</td><td>Vision-based Analysis of Small Groups in Pedestrian Crowds</td><td>Vision-Based Analysis of Small Groups in Pedestrian Crowds</td><td><a href="http://vision.cse.psu.edu/publications/pdfs/GeCollinsRubackPAMI2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision-based analysis of small groups in pedestrian crowds&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066000d44d6691d27202896691f08b27117918b9</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Multi-Camera Activity Correlation Analysis</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="https://doi.org/10.1007/s11263-010-0347-5" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-camera activity correlation analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Time-delayed correlation analysis for multi-camera activity understanding</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="https://doi.org/10.1007/s11263-010-0347-5" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=time-delayed correlation analysis for multi-camera activity understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td>Surveillance Face Recognition Challenge</td><td>Surveillance Face Recognition Challenge</td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=surveillance face recognition challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td></tr><tr><td>rafd</td><td>RaFD</td><td>Presentation and validation of the Radboud Faces Database</td><td>Presentation and validation of the Radboud Faces Database</td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=presentation and validation of the radboud faces database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td></tr><tr><td>raid</td><td>RAiD</td><td>Consistent Re-identification in a Camera Network</td><td>Consistent Re-identification in a Camera Network</td><td><a href="http://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=consistent re-identification in a camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>09d78009687bec46e70efcf39d4612822e61cb8c</td></tr><tr><td>rap_pedestrian</td><td>RAP</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td><a href="http://pdfs.semanticscholar.org/221c/18238b829c12b911706947ab38fd017acef7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a richly annotated dataset for pedestrian attribute recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>221c18238b829c12b911706947ab38fd017acef7</td></tr><tr><td>reseed</td><td>ReSEED</td><td>ReSEED: Social Event dEtection Dataset</td><td>ReSEED: social event dEtection dataset</td><td><a href="https://pub.uni-bielefeld.de/download/2663466/2686734" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=reseed: social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>54983972aafc8e149259d913524581357b0f91c3</td></tr><tr><td>saivt</td><td>SAIVT SoftBio</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a database for person re-identification in multi-camera surveillance networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td></tr><tr><td>sarc3d</td><td>Sarc3D</td><td>SARC3D: a new 3D body model for People Tracking and Re-identification</td><td>SARC3D: A New 3D Body Model for People Tracking and Re-identification</td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sarc3d: a new 3d body model for people tracking and re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>Large Variability Surveillance Camera Face Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td></tr><tr><td>scut_head</td><td>SCUT HEAD</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting heads using feature refine net and cascaded multi-scale architecture&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a spatio-temporal appearance representation for video-based pedestrian re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Local descriptors encoded by Fisher vectors for person re-identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=local descriptors encoded by fisher vectors for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sheffield</td><td>Sheffield Face</td><td>Face Recognition: From Theory to Applications</td><td>Face Description with Local Binary Patterns: Application to Face Recognition</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition: from theory to applications&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td></tr><tr><td>soton</td><td>SOTON HiD</td><td>On a Large Sequence-Based Human Gait Database</td><td>On a large sequence-based human gait database</td><td><a href="http://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=on a large sequence-based human gait database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td></tr><tr><td>stair_actions</td><td>STAIR Action</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stair actions: a video dataset of everyday home actions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Learning to Predict Human Behavior in Crowded Scenes</td><td><a href="http://pdfs.semanticscholar.org/c9bd/a86e23cab9e4f15ea0c4cbb6cc02b9dfb709.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_family</td><td>We Are Family Stickmen</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td><a href="http://pdfs.semanticscholar.org/0dc1/1a37cadda92886c56a6fb5191ded62099c28.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=we are family: joint pose estimation of multiple persons&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the sun attribute database: beyond categories for deeper scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>SUN Attribute Database: +<!doctype html><html><head><meta charset='utf-8'><title>Papers with no location</title><link rel='stylesheet' href='reports.css'></head><body><h2>Papers with no location</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>yfcc_100m</td><td>YFCC100M</td><td>YFCC100M: The New Data in Multimedia Research</td><td>YFCC100M: the new data in multimedia research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yfcc100m: the new data in multimedia research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td></tr><tr><td>buhmap_db</td><td>BUHMAP-DB</td><td>Facial Feature Tracking and Expression Recognition for Sign Language</td><td>Facial feature tracking and expression recognition for sign language</td><td><a href="http://www.cmpe.boun.edu.tr/pilab/pilabfiles/databases/buhmap/files/ari2008facialfeaturetracking.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial feature tracking and expression recognition for sign language&sort=relevance" target="_blank">[s2]</a></td><td></td><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td></tr><tr><td>vqa</td><td>VQA</td><td>VQA: Visual Question Answering</td><td>VQA: Visual Question Answering</td><td><a href="http://arxiv.org/pdf/1505.00468v3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vqa: visual question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>01959ef569f74c286956024866c1d107099199f7</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>Vision meets robotics: The KITTI dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td></tr><tr><td>ilids_mcts</td><td>i-LIDS</td><td>Imagery Library for Intelligent Detection Systems: The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems: the i-lids user guide&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ucf_selfie</td><td>UCF Selfie</td><td>How to Take a Good Selfie?</td><td>How to Take a Good Selfie?</td><td><a href="http://doi.acm.org/10.1145/2733373.2806365" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=how to take a good selfie?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td></tr><tr><td>psu</td><td>PSU</td><td>Vision-based Analysis of Small Groups in Pedestrian Crowds</td><td>Vision-Based Analysis of Small Groups in Pedestrian Crowds</td><td><a href="http://vision.cse.psu.edu/publications/pdfs/GeCollinsRubackPAMI2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision-based analysis of small groups in pedestrian crowds&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066000d44d6691d27202896691f08b27117918b9</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database and evaluation with a new detection algorithm&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>columbia_gaze</td><td>Columbia Gaze</td><td>Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction</td><td>Gaze locking: passive eye contact detection for human-object interaction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gaze locking: passive eye contact detection for human–object interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>06f02199690961ba52997cde1527e714d2b3bf8f</td></tr><tr><td>mit_cbcl</td><td>MIT CBCL</td><td>Component-based Face Recognition with 3D Morphable Models</td><td>Component-Based Face Recognition with 3D Morphable Models</td><td><a href="http://www.bheisele.com/avbpa2003.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=component-based face recognition with 3d morphable models&sort=relevance" target="_blank">[s2]</a></td><td></td><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Understanding Kin Relationships in a Photo</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding kin relationships in a photo&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>raid</td><td>RAiD</td><td>Consistent Re-identification in a Camera Network</td><td>Consistent Re-identification in a Camera Network</td><td><a href="http://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=consistent re-identification in a camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>09d78009687bec46e70efcf39d4612822e61cb8c</td></tr><tr><td>pipa</td><td>PIPA</td><td>Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues</td><td>Beyond frontal faces: Improving Person Recognition using multiple cues</td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=beyond frontal faces: improving person recognition using multiple cues&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0a85bdff552615643dd74646ac881862a7c7072d</td></tr><tr><td>kinectface</td><td>KinectFaceDB</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinectfacedb: a kinect database for face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret verification testing protocol for face recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td><a href="http://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=weak hypotheses and boosting for generic object detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c91808994a250d7be332400a534a9291ca3b60e</td></tr><tr><td>ijb_c</td><td>IJB-B</td><td>IARPA Janus Benchmark-B Face Dataset</td><td>IARPA Janus Benchmark-B Face Dataset</td><td><a href="http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark-b face dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>hollywood_headset</td><td>HollywoodHeads</td><td>Context-aware CNNs for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware cnns for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>lag</td><td>LAG</td><td>Large Age-Gap Face Verification by Feature Injection in Deep Networks</td><td>Large age-gap face verification by feature injection in deep networks</td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large age-gap face verification by feature injection in deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td></tr><tr><td>stickmen_family</td><td>We Are Family Stickmen</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td><a href="http://pdfs.semanticscholar.org/0dc1/1a37cadda92886c56a6fb5191ded62099c28.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=we are family: joint pose estimation of multiple persons&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td></tr><tr><td>voc</td><td>VOC</td><td>The PASCAL Visual Object Classes (VOC) Challenge</td><td>The Pascal Visual Object Classes (VOC) Challenge</td><td><a href="https://doi.org/10.1007/s11263-009-0275-4" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the pascal visual object classes (voc) challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret evaluation methodology for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>Deep expectation of real and apparent age from a single image without facial landmarks</td><td>Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</td><td><a href="https://doi.org/10.1007/s11263-016-0940-3" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep expectation of real and apparent age from a single image without facial landmarks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10195a163ab6348eef37213a46f60a3d87f289c5</td></tr><tr><td>inria_person</td><td>INRIA Pedestrian</td><td>Histograms of Oriented Gradients for Human Detection</td><td>Histograms of oriented gradients for human detection</td><td><a href="http://nichol.as/papers/Dalai/Histograms%20of%20oriented%20gradients%20for%20human%20detection.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=histograms of oriented gradients for human detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Object Recognition Using Segmentation for Feature Detection</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object recognition using segmentation for feature detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>lfw_a</td><td>LFW-a</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=effective unconstrained face recognition by combining multiple descriptors and learned background statistics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>133f01aec1534604d184d56de866a4bd531dac87</td></tr><tr><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td>Depth and Appearance for Mobile Scene Analysis</td><td>Depth and Appearance for Mobile Scene Analysis</td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07.pdf/at_download/file" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=depth and appearance for mobile scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td></tr><tr><td>ijb_c</td><td>IJB-A</td><td>Pushing the Frontiers of Unconstrained Face Detection and Recognition: IARPA Janus Benchmark A</td><td>Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the frontiers of unconstrained face detection and recognition: iarpa janus benchmark a&sort=relevance" target="_blank">[s2]</a></td><td></td><td>140c95e53c619eac594d70f6369f518adfea12ef</td></tr><tr><td>vgg_faces</td><td>VGG Face</td><td>Deep Face Recognition</td><td>Deep Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td></tr><tr><td>prid</td><td>PRID</td><td>Person Re-Identification by Descriptive and Discriminative Classification</td><td>Person Re-identification by Descriptive and Discriminative Classification</td><td><a href="http://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification by descriptive and discriminative classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td></tr><tr><td>umb</td><td>UMB</td><td>UMB-DB: A Database of Partially Occluded 3D Faces</td><td>UMB-DB: A database of partially occluded 3D faces</td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130509" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umb-db: a database of partially occluded 3d faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780493" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepfashion: powering robust clothes recognition and retrieval with rich annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</td><td><a href="https://pdfs.semanticscholar.org/03c1/fc9c3339813ed81ad0de540132f9f695a0f8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from few to many: illumination cone models for face recognition under variable lighting and pose&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>york_3d</td><td>UOY 3D Face Database</td><td>Three-Dimensional Face Recognition: An Eigensurface Approach</td><td>Three-dimensional face recognition: an eigensurface approach</td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=three-dimensional face recognition: an eigensurface approach&sort=relevance" target="_blank">[s2]</a></td><td></td><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td></tr><tr><td>jpl_pose</td><td>JPL-Interaction dataset</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Ryoo_First-Person_Activity_Recognition_2013_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=first-person activity recognition: what are they doing to me?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td></tr><tr><td>adience</td><td>Adience</td><td>Age and Gender Estimation of Unfiltered Faces</td><td>Age and Gender Estimation of Unfiltered Faces</td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=age and gender estimation of unfiltered faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td></tr><tr><td>youtube_poses</td><td>YouTube Pose</td><td>Personalizing Human Video Pose Estimation</td><td>Personalizing Human Video Pose Estimation</td><td><a href="http://arxiv.org/pdf/1511.06676v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=personalizing human video pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: A Benchmark</td><td>Pedestrian detection: A benchmark</td><td><a href="http://vision.ucsd.edu/~pdollar/files/papers/DollarCVPR09peds.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td></tr><tr><td>immediacy</td><td>Immediacy</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.383" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-task recurrent neural network for immediacy prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td></tr><tr><td>cafe</td><td>CAFE</td><td>The Child Affective Facial Expression (CAFE) Set: Validity and reliability from untrained adults</td><td>The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</td><td><a href="http://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the child affective facial expression (cafe) set: validity and reliability from untrained adults&sort=relevance" target="_blank">[s2]</a></td><td></td><td>20388099cc415c772926e47bcbbe554e133343d1</td></tr><tr><td>bbc_pose</td><td>BBC Pose</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td><a href="https://doi.org/10.1007/s11263-013-0672-6" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic and efficient human pose estimation for sign language videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>213a579af9e4f57f071b884aa872651372b661fd</td></tr><tr><td>3d_rma</td><td>3D-RMA</td><td>Automatic 3D Face Authentication</td><td>Automatic 3D face authentication</td><td><a href="http://pdfs.semanticscholar.org/2160/788824c4c29ffe213b2cbeb3f52972d73f37.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic 3d face authentication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td></tr><tr><td>large_scale_person_search</td><td>Large Scale Person Search</td><td>End-to-End Deep Learning for Person Search</td><td>End-to-End Deep Learning for Person Search</td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end deep learning for person search&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>rap_pedestrian</td><td>RAP</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td><a href="http://pdfs.semanticscholar.org/221c/18238b829c12b911706947ab38fd017acef7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a richly annotated dataset for pedestrian attribute recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>221c18238b829c12b911706947ab38fd017acef7</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td><a href="http://pdfs.semanticscholar.org/2e0b/00f4043e2d4b04c59c88bb54bcd907d0dcd4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2258e01865367018ed6f4262c880df85b94959f8</td></tr><tr><td>saivt</td><td>SAIVT SoftBio</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a database for person re-identification in multi-camera surveillance networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td></tr><tr><td>pets</td><td>PETS 2017</td><td>PETS 2017: Dataset and Challenge</td><td>PETS 2017: Dataset and Challenge</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014998" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pets 2017: dataset and challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td></tr><tr><td>gallagher</td><td>Gallagher</td><td>Clothing Cosegmentation for Recognizing People</td><td>Clothing cosegmentation for recognizing people</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2008.4587481" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing cosegmentation for recognizing people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22ad2c8c0f4d6aa4328b38d894b814ec22579761</td></tr><tr><td>expw</td><td>ExpW</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="http://arxiv.org/abs/1609.06426" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>cas_peal</td><td>CAS-PEAL</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cas-peal large-scale chinese face database and baseline evaluations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>faceplace</td><td>Face Place</td><td>Recognizing disguised faces</td><td>Recognizing disguised faces</td><td><a href="http://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognizing disguised faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td><a href="http://pdfs.semanticscholar.org/2624/d84503bc2f8e190e061c5480b6aa4d89277a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=afew-va database for valence and arousal estimation in-the-wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td></tr><tr><td>cofw</td><td>COFW</td><td>Robust face landmark estimation under occlusion</td><td>Robust Face Landmark Estimation under Occlusion</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face landmark estimation under occlusion&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2724ba85ec4a66de18da33925e537f3902f21249</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>h3d</td><td>H3D</td><td>Poselets: Body Part Detectors Trained Using 3D Human Pose Annotations</td><td>Poselets: Body part detectors trained using 3D human pose annotations</td><td><a href="http://vision.stanford.edu/teaching/cs231b_spring1213/papers/ICCV09_BourdevMalik.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=poselets: body part detectors trained using 3d human pose annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2830fb5282de23d7784b4b4bc37065d27839a412</td></tr><tr><td>megaface</td><td>MegaFace</td><td>Level Playing Field for Million Scale Face Recognition</td><td>Level Playing Field for Million Scale Face Recognition</td><td><a href="https://arxiv.org/pdf/1705.00393.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=level playing field for million scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td></tr><tr><td>msceleb</td><td>MsCeleb</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ms-celeb-1m: a dataset and benchmark for large-scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>291265db88023e92bb8c8e6390438e5da148e8f5</td></tr><tr><td>ferplus</td><td>FER+</td><td>Training Deep Networks for Facial Expression Recognition with Crowd-Sourced Label Distribution</td><td>Training deep networks for facial expression recognition with crowd-sourced label distribution</td><td><a href="http://arxiv.org/pdf/1608.01041v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=training deep networks for facial expression recognition with crowd-sourced label distribution&sort=relevance" target="_blank">[s2]</a></td><td></td><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>SCface – surveillance cameras face database</td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td></tr><tr><td>peta</td><td>PETA</td><td>Pedestrian Attribute Recognition At Far Distance</td><td>Pedestrian Attribute Recognition At Far Distance</td><td><a href="http://personal.ie.cuhk.edu.hk/~pluo/pdf/mm14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute recognition at far distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td></tr><tr><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td>WEB-BASED DATABASE FOR FACIAL EXPRESSION ANALYSIS</td><td>Web-based database for facial expression analysis</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ICME2005-final.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=web-based database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td></tr><tr><td>bosphorus</td><td>The Bosphorus</td><td>Bosphorus Database for 3D Face Analysis</td><td>Bosphorus Database for 3D Face Analysis</td><td><a href="http://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=bosphorus database for 3d face analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2acf7e58f0a526b957be2099c10aab693f795973</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>Acquiring Linear Subspaces for Face Recognition under Variable Lighting</td><td>Acquiring linear subspaces for face recognition under variable lighting</td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/pami05.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=acquiring linear subspaces for face recognition under variable lighting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td></tr><tr><td>frav3d</td><td>FRAV3D</td><td>MULTIMODAL 2D, 2.5D & 3D FACE VERIFICATION</td><td>Multimodal 2D, 2.5D & 3D Face Verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal 2d, 2.5d & 3d face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2b926b3586399d028b46315d7d9fb9d879e4f79c</td></tr><tr><td>clothing_co_parsing</td><td>CCP</td><td>Clothing Co-Parsing by Joint Image Segmentation and Labeling</td><td>Clothing Co-parsing by Joint Image Segmentation and Labeling</td><td><a href="https://arxiv.org/pdf/1502.00739v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing co-parsing by joint image segmentation and labeling&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2bf8541199728262f78d4dced6fb91479b39b738</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Anthropometric 3D Face Recognition</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=anthropometric 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>a_pascal_yahoo</td><td>aPascal</td><td>Describing Objects by their Attributes</td><td>Describing objects by their attributes</td><td><a href="http://www-2.cs.cmu.edu/~dhoiem/publications/cvpr2009_attributes.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing objects by their attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td></tr><tr><td>3dpes</td><td>3DPeS</td><td>3DPes: 3D People Dataset for Surveillance and Forensics</td><td>3DPeS: 3D people dataset for surveillance and forensics</td><td><a href="http://doi.acm.org/10.1145/2072572.2072590" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=3dpes: 3d people dataset for surveillance and forensics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Time-delayed correlation analysis for multi-camera activity understanding</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="https://doi.org/10.1007/s11263-010-0347-5" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=time-delayed correlation analysis for multi-camera activity understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Generic Object Recognition with Boosting</td><td>Generic object recognition with boosting</td><td><a href="http://www.emt.tu-graz.ac.at/~pinz/onlinepapers/Reprint_Vol_28_No_3_2006.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=generic object recognition with boosting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td></tr><tr><td>names_and_faces_news</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>umd_faces</td><td>UMD</td><td>UMDFaces: An Annotated Face Dataset for Training Deep Networks</td><td>UMDFaces: An annotated face dataset for training deep networks</td><td><a href="http://arxiv.org/abs/1611.01484" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umdfaces: an annotated face dataset for training deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b05f65405534a696a847dd19c621b7b8588263</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>feret</td><td>FERET</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td><a href="http://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=feret ( face recognition technology ) recognition algorithm development and test results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td></tr><tr><td>ucf_crowd</td><td>UCF-CC-50</td><td>Multi-Source Multi-Scale Counting in Extremely Dense Crowd Images</td><td>Multi-source Multi-scale Counting in Extremely Dense Crowd Images</td><td><a href="http://www.cs.ucf.edu/~haroon/datafiles/Idrees_Counting_CVPR_2013.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-source multi-scale counting in extremely dense crowd images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td><a href="https://arxiv.org/pdf/1604.01685.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset for semantic urban scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td></tr><tr><td>tud_campus</td><td>TUD-Campus</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_crossing</td><td>TUD-Crossing</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>mpii_human_pose</td><td>MPII Human Pose</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909866" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=2d human pose estimation: new benchmark and state of the art analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td></tr><tr><td>penn_fudan</td><td>Penn Fudan</td><td>Object Detection Combining Recognition and Segmentation</td><td>Object Detection Combining Recognition and Segmentation</td><td><a href="http://pdfs.semanticscholar.org/f531/a554cade14b9b340de6730683a28c292dd74.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object detection combining recognition and segmentation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td></tr><tr><td>coco_qa</td><td>COCO QA</td><td>Exploring Models and Data for Image Question Answering</td><td>Exploring Models and Data for Image Question Answering</td><td><a href="http://pdfs.semanticscholar.org/aa79/9c29c0d44ece1864467af520fe70540c069b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring models and data for image question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>rafd</td><td>RaFD</td><td>Presentation and validation of the Radboud Faces Database</td><td>Presentation and validation of the Radboud Faces Database</td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=presentation and validation of the radboud faces database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td></tr><tr><td>vmu</td><td>VMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Can facial cosmetics affect the matching accuracy of face recognition systems?</td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Can facial cosmetics affect the matching accuracy of face recognition systems?</td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td></tr><tr><td>cuhk02</td><td>CUHK02</td><td>Locally Aligned Feature Transforms across Views</td><td>Locally Aligned Feature Transforms across Views</td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=locally aligned feature transforms across views&sort=relevance" target="_blank">[s2]</a></td><td></td><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.434" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a spatio-temporal appearance representation for video-based pedestrian re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Multi-Camera Activity Correlation Analysis</td><td>Multi-camera activity correlation analysis</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0163.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-camera activity correlation analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td></tr><tr><td>tvhi</td><td>TVHI</td><td>High Five: Recognising human interactions in TV shows</td><td>High Five: Recognising human interactions in TV shows</td><td><a href="http://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=high five: recognising human interactions in tv shows&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>MAXIMUM LIKELIHOOD TRAINING OF THE EMBEDDED HMM FOR FACE DETECTION AND RECOGNITION Ara V. Ne an and Monson H. Hayes III Center for Signal and Image Processing School of Electrical and Computer Engineering</td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>bio_id</td><td>BioID Face</td><td>Robust Face Detection Using the Hausdorff Distance</td><td>Robust Face Detection Using the Hausdorff Distance</td><td><a href="http://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face detection using the hausdorff distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4053e3423fb70ad9140ca89351df49675197196a</td></tr><tr><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td>Nighttime Face Recognition at Long Distance: Cross-distance and Cross-spectral Matching</td><td>Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</td><td><a href="http://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=nighttime face recognition at long distance: cross-distance and cross-spectral matching&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Scalable Person Re-identification: A Benchmark</td><td>Scalable Person Re-identification: A Benchmark</td><td><a href="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/01/ICCV15-ReIDDataset.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scalable person re-identification: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td></tr><tr><td>tud_multiview</td><td>TUD-Multiview</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>cuhk01</td><td>CUHK01</td><td>Human Reidentification with Transferred Metric Learning</td><td>Human Reidentification with Transferred Metric Learning</td><td><a href="http://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human reidentification with transferred metric learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td></tr><tr><td>vadana</td><td>VADANA</td><td>VADANA: A dense dataset for facial image analysis</td><td>VADANA: A dense dataset for facial image analysis</td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vadana: a dense dataset for facial image analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Local descriptors encoded by Fisher vectors for person re-identification</td><td>Local Descriptors Encoded by Fisher Vectors for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/dd1d/51c3a59cb71cbfe1433ebeb4d973f7f9ddc1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=local descriptors encoded by fisher vectors for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td></tr><tr><td>fia</td><td>CMU FiA</td><td>The CMU Face In Action (FIA) Database</td><td>The CMU Face In Action (FIA) Database</td><td><a href="http://pdfs.semanticscholar.org/bb47/a03401811f9d2ca2da12138697acbc7b97a3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu face in action (fia) database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Kinship Verification through Transfer Learning</td><td>Kinship Verification through Transfer Learning</td><td><a href="http://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinship verification through transfer learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>apis</td><td>APiS1.0</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>svs</td><td>SVS</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>coco_action</td><td>COCO-a</td><td>Describing Common Human Visual Actions in Images</td><td>Describing Common Human Visual Actions in Images</td><td><a href="http://pdfs.semanticscholar.org/b38d/cf5fa5174c0d718d65cc4f3889b03c4a21df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing common human visual actions in images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td></tr><tr><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>czech_news_agency</td><td>UFI</td><td>Unconstrained Facial Images: Database for Face Recognition under Real-world Conditions</td><td>Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</td><td><a href="http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained facial images: database for face recognition under real-world conditions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b4106614c1d553365bad75d7866bff0de6056ed</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>A 3 D Dynamic Database for Unconstrained Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Texas 3D Face Recognition Database</td><td>Texas 3D Face Recognition Database</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ssiai_may10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=texas 3d face recognition database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td></tr><tr><td>cohn_kanade_plus</td><td>CK+</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the extended cohn-kanade dataset (ck+): a complete dataset for action unit and emotion-specified expression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td>Learning Effective Human Pose Estimation from Inaccurate Annotation</td><td>Learning effective human pose estimation from inaccurate annotation</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995318" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning effective human pose estimation from inaccurate annotation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>soton</td><td>SOTON HiD</td><td>On a Large Sequence-Based Human Gait Database</td><td>On a large sequence-based human gait database</td><td><a href="http://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=on a large sequence-based human gait database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td></tr><tr><td>violent_flows</td><td>Violent Flows</td><td>Violent Flows: Real-Time Detection of Violent Crowd Behavior</td><td>Violent flows: Real-time detection of violent crowd behavior</td><td><a href="http://www.wisdom.weizmann.ac.il/mathusers/kliper/Papers/violent_flows.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=violent flows: real-time detection of violent crowd behavior&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5194cbd51f9769ab25260446b4fa17204752e799</td></tr><tr><td>bp4d_plus</td><td>BP4D+</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal spontaneous emotion corpus for human behavior analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td></tr><tr><td>reseed</td><td>ReSEED</td><td>ReSEED: Social Event dEtection Dataset</td><td>ReSEED: social event dEtection dataset</td><td><a href="https://pub.uni-bielefeld.de/download/2663466/2686734" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=reseed: social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>54983972aafc8e149259d913524581357b0f91c3</td></tr><tr><td>orl</td><td>ORL</td><td>Parameterisation of a Stochastic Model for Human Face Identification</td><td>Parameterisation of a stochastic model for human face identification</td><td><a href="http://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=parameterisation of a stochastic model for human face identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55206f0b5f57ce17358999145506cd01e570358c</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database : A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>data_61</td><td>Data61 Pedestrian</td><td>A Multi-Modal Graphical Model for Scene Analysis</td><td>A Multi-modal Graphical Model for Scene Analysis</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2015.139" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-modal graphical model for scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>563c940054e4b456661762c1ab858e6f730c3159</td></tr><tr><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td>PAINFUL DATA: The UNBC-McMaster Shoulder Pain Expression Archive Database</td><td>Painful data: The UNBC-McMaster shoulder pain expression archive database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771462" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=painful data: the unbc-mcmaster shoulder pain expression archive database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Social LSTM: Human Trajectory Prediction in Crowded Spaces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>50_people_one_question</td><td>50 People One Question</td><td>Merging Pose Estimates Across Space and Time</td><td>Merging Pose Estimates Across Space and Time</td><td><a href="http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merging pose estimates across space and time&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><a href="https://pdfs.semanticscholar.org/be5b/455abd379240460d022a0e246615b0b86c14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance" target="_blank">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On – board Pedestrian Detection</td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>mot</td><td>MOT</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to associate: hybridboosted multi-target tracker for crowded scene&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>disfa</td><td>DISFA</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/T-AFFC.2013.4" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=disfa: a spontaneous facial action intensity database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td></tr><tr><td>wlfdb</td><td>WLFDB</td><td>WLFDB: Weakly Labeled Face Databases</td><td>WLFDB: Weakly Labeled Face Databases</td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wlfdb: weakly labeled face databases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td></tr><tr><td>coco</td><td>COCO</td><td>Microsoft COCO: Common Objects in Context</td><td>Microsoft COCO: Common Objects in Context</td><td><a href="http://pdfs.semanticscholar.org/71b7/178df5d2b112d07e45038cb5637208659ff7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=microsoft coco: common objects in context&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset</td><td>The Cityscapes Dataset</td><td><a href="http://pdfs.semanticscholar.org/5ffd/74d2873b7cba2cbc5fd295cc7fbdedca22a2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5ffd74d2873b7cba2cbc5fd295cc7fbdedca22a2</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition , Reacquisition , and Tracking</td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td>Pruning Training Sets for Learning of Object Categories</td><td>Pruning training sets for learning of object categories</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pruning training sets for learning of object categories&sort=relevance" target="_blank">[s2]</a></td><td></td><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td></tr><tr><td>bfm</td><td>BFM</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/6399/37b3a1b8bded3f7e9a40e85bd3770016cf3c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d face model for pose and illumination invariant face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</td><td><a href="https://arxiv.org/pdf/1605.09653.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>complex_activities</td><td>Ongoing Complex Activities</td><td>Recognition of Ongoing Complex Activities by Sequence Prediction over a Hierarchical Label Space</td><td>Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477586" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition of ongoing complex activities by sequence prediction over a hierarchical label space&sort=relevance" target="_blank">[s2]</a></td><td></td><td>65355cbb581a219bd7461d48b3afd115263ea760</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the sun attribute database: beyond categories for deeper scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td></tr><tr><td>tud_brussels</td><td>TUD-Brussels</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_motionpairs</td><td>TUD-Motionparis</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>cuhk03</td><td>CUHK03</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepreid: deep filter pairing neural network for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td></tr><tr><td>ar_facedb</td><td>AR Face</td><td>The AR Face Database</td><td>The AR face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the ar face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td></tr><tr><td>agedb</td><td>AgeDB</td><td>AgeDB: the first manually collected, in-the-wild age database</td><td>AgeDB: The First Manually Collected, In-the-Wild Age Database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=agedb: the first manually collected, in-the-wild age database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>ward</td><td>WARD</td><td>Re-identify people in wide area camera network</td><td>Re-identify people in wide area camera network</td><td><a href="https://doi.org/10.1109/CVPRW.2012.6239203" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identify people in wide area camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><a href="https://arxiv.org/pdf/1705.07426.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>affectnet</td><td>AffectNet</td><td>AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</td><td>AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectnet: a database for facial expression, valence, and arousal computing in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>77c81c13a110a341c140995bedb98101b9e84f7f</td></tr><tr><td>berkeley_pose</td><td>BPAD</td><td>Describing People: A Poselet-Based Approach to Attribute Classification</td><td>Describing people: A poselet-based approach to attribute classification</td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing people: a poselet-based approach to attribute classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td></tr><tr><td>mapillary</td><td>Mapillary</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237796" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mapillary vistas dataset for semantic understanding of street scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td></tr><tr><td>nudedetection</td><td>Nude Detection</td><td>A Bag-of-Features Approach based on Hue-SIFT Descriptor for Nude Detection</td><td>A bag-of-features approach based on Hue-SIFT descriptor for nude detection</td><td><a href="http://ieeexplore.ieee.org/document/7077625/" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a bag-of-features approach based on hue-sift descriptor for nude detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild : A Survey</td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>SUN Attribute Database: Discovering, Annotating, and Recognizing Scene Attributes</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sun attribute database: -discovering, annotating, and recognizing scene attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>svs</td><td>SVS</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Texas 3D Face Recognition Database</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=texas 3d face recognition database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Anthropometric 3D Face Recognition</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=anthropometric 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>tiny_faces</td><td>TinyFace</td><td>Low-Resolution Face Recognition</td><td>Low-Resolution Face Recognition</td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=low-resolution face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8990cdce3f917dad622e43e033db686b354d057c</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>towncenter</td><td>TownCenter</td><td>Stable Multi-Target Tracking in Real-Time Surveillance Video</td><td>Stable multi-target tracking in real-time surveillance video</td><td><a href="http://pdfs.semanticscholar.org/9361/b784e73e9238d5cefbea5ac40d35d1e3103f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stable multi-target tracking in real-time surveillance video&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td></tr><tr><td>tud_brussels</td><td>TUD-Brussels</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_campus</td><td>TUD-Campus</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_crossing</td><td>TUD-Crossing</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_motionpairs</td><td>TUD-Motionparis</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_multiview</td><td>TUD-Multiview</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tvhi</td><td>TVHI</td><td>High Five: Recognising human interactions in TV shows</td><td>High Five: Recognising human interactions in TV shows</td><td><a href="http://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=high five: recognising human interactions in tv shows&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td></tr><tr><td>ucf_crowd</td><td>UCF-CC-50</td><td>Multi-Source Multi-Scale Counting in Extremely Dense Crowd Images</td><td>Multi-source Multi-scale Counting in Extremely Dense Crowd Images</td><td><a href="http://www.cs.ucf.edu/~haroon/datafiles/Idrees_Counting_CVPR_2013.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-source multi-scale counting in extremely dense crowd images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td></tr><tr><td>ucf_selfie</td><td>UCF Selfie</td><td>How to Take a Good Selfie?</td><td>How to Take a Good Selfie?</td><td><a href="http://doi.acm.org/10.1145/2733373.2806365" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=how to take a good selfie?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td></tr><tr><td>umb</td><td>UMB</td><td>UMB-DB: A Database of Partially Occluded 3D Faces</td><td>UMB-DB: A database of partially occluded 3D faces</td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130509" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umb-db: a database of partially occluded 3d faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td></tr><tr><td>umd_faces</td><td>UMD</td><td>UMDFaces: An Annotated Face Dataset for Training Deep Networks</td><td>UMDFaces: An annotated face dataset for training deep networks</td><td><a href="http://arxiv.org/abs/1611.01484" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umdfaces: an annotated face dataset for training deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b05f65405534a696a847dd19c621b7b8588263</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><a href="https://arxiv.org/pdf/1705.07426.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td>PAINFUL DATA: The UNBC-McMaster Shoulder Pain Expression Archive Database</td><td>Painful data: The UNBC-McMaster shoulder pain expression archive database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771462" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=painful data: the unbc-mcmaster shoulder pain expression archive database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td></tr><tr><td>used</td><td>USED Social Event Dataset</td><td>USED: A Large-scale Social Event Detection Dataset</td><td>USED: a large-scale social event detection dataset</td><td><a href="http://doi.acm.org/10.1145/2910017.2910624" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=used: a large-scale social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8627f019882b024aef92e4eb9355c499c733e5b7</td></tr><tr><td>v47</td><td>V47</td><td>Re-identification of Pedestrians with Variable Occlusion and Scale</td><td>Re-identification of pedestrians with variable occlusion and scale</td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130477" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identification of pedestrians with variable occlusion and scale&sort=relevance" target="_blank">[s2]</a></td><td></td><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td></tr><tr><td>vadana</td><td>VADANA</td><td>VADANA: A dense dataset for facial image analysis</td><td>VADANA: A dense dataset for facial image analysis</td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vadana: a dense dataset for facial image analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td></tr><tr><td>vgg_faces</td><td>VGG Face</td><td>Deep Face Recognition</td><td>Deep Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td></tr><tr><td>violent_flows</td><td>Violent Flows</td><td>Violent Flows: Real-Time Detection of Violent Crowd Behavior</td><td>Violent flows: Real-time detection of violent crowd behavior</td><td><a href="http://www.wisdom.weizmann.ac.il/mathusers/kliper/Papers/violent_flows.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=violent flows: real-time detection of violent crowd behavior&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5194cbd51f9769ab25260446b4fa17204752e799</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition , Reacquisition , and Tracking</td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>visual_phrases</td><td>Phrasal Recognition</td><td>Recognition using Visual Phrases</td><td>Recognition using visual phrases</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995711" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition using visual phrases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e8de844fefd54541b71c9823416daa238be65546</td></tr><tr><td>vmu</td><td>VMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Can facial cosmetics affect the matching accuracy of face recognition systems?</td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td></tr><tr><td>voc</td><td>VOC</td><td>The PASCAL Visual Object Classes (VOC) Challenge</td><td>The Pascal Visual Object Classes Challenge: A Retrospective</td><td><a href="http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the pascal visual object classes (voc) challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>abe9f3b91fd26fa1b50cd685c0d20debfb372f73</td></tr><tr><td>vqa</td><td>VQA</td><td>VQA: Visual Question Answering</td><td>VQA: Visual Question Answering</td><td><a href="http://arxiv.org/pdf/1505.00468v3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vqa: visual question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>01959ef569f74c286956024866c1d107099199f7</td></tr><tr><td>ward</td><td>WARD</td><td>Re-identify people in wide area camera network</td><td>Re-identify people in wide area camera network</td><td><a href="https://doi.org/10.1109/CVPRW.2012.6239203" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identify people in wide area camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><a href="http://doi.acm.org/10.1145/2996913.2996997" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>77c81c13a110a341c140995bedb98101b9e84f7f</td></tr><tr><td>wlfdb</td><td></td><td>WLFDB: Weakly Labeled Face Databases</td><td>WLFDB: Weakly Labeled Face Databases</td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wlfdb: weakly labeled face databases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>Acquiring Linear Subspaces for Face Recognition under Variable Lighting</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=acquiring linear subspaces for face recognition under variable lighting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from few to many: illumination cone models for face recognition under variable lighting and pose&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yawdd</td><td>YawDD</td><td>YawDD: A Yawning Detection Dataset</td><td>YawDD: a yawning detection dataset</td><td><a href="http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yawdd: a yawning detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a94cae786d515d3450d48267e12ca954aab791c4</td></tr><tr><td>yfcc_100m</td><td>YFCC100M</td><td>YFCC100M: The New Data in Multimedia Research</td><td>The New Data and New Challenges in Multimedia Research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yfcc100m: the new data in multimedia research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a6e695ddd07aad719001c0fc1129328452385949</td></tr><tr><td>york_3d</td><td>UOY 3D Face Database</td><td>Three-Dimensional Face Recognition: An Eigensurface Approach</td><td>Three-dimensional face recognition: an eigensurface approach</td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=three-dimensional face recognition: an eigensurface approach&sort=relevance" target="_blank">[s2]</a></td><td></td><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td></tr><tr><td>youtube_poses</td><td>YouTube Pose</td><td>Personalizing Human Video Pose Estimation</td><td>Personalizing Human Video Pose Estimation</td><td><a href="http://arxiv.org/pdf/1511.06676v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=personalizing human video pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td></tr></table></body></html>
\ No newline at end of file +discovering, annotating, and recognizing scene attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>DEX: Deep EXpectation of apparent age from a single image</td><td>DEX: Deep EXpectation of Apparent Age from a Single Image</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=dex: deep expectation of apparent age from a single image&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td></tr><tr><td>awe_ears</td><td>AWE Ears</td><td>Ear Recognition: More Than a Survey</td><td>Ear Recognition: More Than a Survey</td><td><a href="http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ear recognition: more than a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>84fe5b4ac805af63206012d29523a1e033bc827e</td></tr><tr><td>casia_webface</td><td>CASIA Webface</td><td>Learning Face Representation from Scratch</td><td>Learning Face Representation from Scratch</td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning face representation from scratch&sort=relevance" target="_blank">[s2]</a></td><td></td><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td></tr><tr><td>used</td><td>USED Social Event Dataset</td><td>USED: A Large-scale Social Event Detection Dataset</td><td>USED: a large-scale social event detection dataset</td><td><a href="http://doi.acm.org/10.1145/2910017.2910624" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=used: a large-scale social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8627f019882b024aef92e4eb9355c499c733e5b7</td></tr><tr><td>tiny_faces</td><td>TinyFace</td><td>Low-Resolution Face Recognition</td><td>Low-Resolution Face Recognition</td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=low-resolution face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8990cdce3f917dad622e43e033db686b354d057c</td></tr><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>A new ranking method for principal components analysis and its application to face image analysis</td><td><a href="https://doi.org/10.1016/j.imavis.2009.11.005" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td></tr><tr><td>chalearn</td><td>ChaLearn</td><td>ChaLearn Looking at People: A Review of Events and Resources</td><td>ChaLearn looking at people: A review of events and resources</td><td><a href="https://arxiv.org/pdf/1701.02664.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=chalearn looking at people: a review of events and resources&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>The HDA+ data set for research on fully automated re-identification systems</td><td>The HDA+ Data Set for Research on Fully Automated Re-identification Systems</td><td><a href="http://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the hda+ data set for research on fully automated re-identification systems&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td></tr><tr><td>morph</td><td>MORPH Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>morph_nc</td><td>MORPH Non-Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>v47</td><td>V47</td><td>Re-identification of Pedestrians with Variable Occlusion and Scale</td><td>Re-identification of pedestrians with variable occlusion and scale</td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130477" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identification of pedestrians with variable occlusion and scale&sort=relevance" target="_blank">[s2]</a></td><td></td><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td></tr><tr><td>towncenter</td><td>TownCenter</td><td>Stable Multi-Target Tracking in Real-Time Surveillance Video</td><td>Stable multi-target tracking in real-time surveillance video</td><td><a href="http://pdfs.semanticscholar.org/9361/b784e73e9238d5cefbea5ac40d35d1e3103f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stable multi-target tracking in real-time surveillance video&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td></tr><tr><td>4dfab</td><td>4DFAB</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=4dfab: a large scale 4d facial expression database for biometric applications&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td></tr><tr><td>ilids_vid_reid</td><td>iLIDS-VID</td><td>Person Re-Identication by Video Ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identication by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>m2vts</td><td>m2vts</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the m2vts multimodal face database (release 1.00)&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9a9877791945c6fa4c1743ec6d3fb32570ef8481</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><a href="http://doi.acm.org/10.1145/2996913.2996997" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>precarious</td><td>Precarious</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians With Adversarial Imposters</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</td><td><a href="https://arxiv.org/pdf/1703.06283.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=expecting the unexpected: training detectors for unusual pedestrians with adversarial imposters&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td></tr><tr><td>mafl</td><td>MAFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>moments_in_time</td><td>Moments in Time</td><td>Moments in Time Dataset: one million videos for event understanding</td><td>Moments in Time Dataset: one million videos for event understanding</td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=moments in time dataset: one million videos for event understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td></tr><tr><td>aflw</td><td>AFLW</td><td>Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=annotated facial landmarks in the wild: a large-scale, real-world database for facial landmark localization&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>market1203</td><td>Market 1203</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Orientation driven bag of appearances for person re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td></tr><tr><td>yawdd</td><td>YawDD</td><td>YawDD: A Yawning Detection Dataset</td><td>YawDD: a yawning detection dataset</td><td><a href="http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yawdd: a yawning detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a94cae786d515d3450d48267e12ca954aab791c4</td></tr><tr><td>mrp_drone</td><td>MRP Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td><a href="http://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating open-world person re-identification using a drone&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>The put face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>Xm2vtsdb: the Extended M2vts Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database with age, pose and expression</td><td>Iranian Face Database with age, pose and expression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database with age, pose and expression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td></tr><tr><td>pornodb</td><td>Pornography DB</td><td>Pooling in Image Representation: the Visual Codeword Point of View</td><td>Pooling in image representation: The visual codeword point of view</td><td><a href="http://pdfs.semanticscholar.org/b92a/1ed9622b8268ae3ac9090e25789fc41cc9b8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pooling in image representation: the visual codeword point of view&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>mars</td><td>MARS</td><td>MARS: A Video Benchmark for Large-Scale Person Re-identification</td><td>MARS: A Video Benchmark for Large-Scale Person Re-Identification</td><td><a href="http://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=mars: a video benchmark for large-scale person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Crowdsourcing facial expressions for affective-interaction</td><td>Crowdsourcing facial expressions for affective-interaction</td><td><a href="https://doi.org/10.1016/j.cviu.2016.02.001" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=crowdsourcing facial expressions for affective-interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Hierarchical Temporal Graphical Model for Head Pose Estimation and Subsequent Attribute Classification in Real-World Videos</td><td>Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a5a3bc3e5e9753769163cb30b16dbd12e266b93e</td></tr><tr><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td>Surveillance Face Recognition Challenge</td><td>Surveillance Face Recognition Challenge</td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=surveillance face recognition challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td></tr><tr><td>emotio_net</td><td>EmotioNet Database</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=emotionet: an accurate, real-time algorithm for the automatic annotation of a million facial expressions in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td></tr><tr><td>imfdb</td><td>IMFDB</td><td>Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations</td><td>Indian Movie Face Database: A benchmark for face recognition under wide variations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian movie face database: a benchmark for face recognition under wide variations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td></tr><tr><td>b3d_ac</td><td>B3D(AC)</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3-d audio-visual corpus of affective communication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td></tr><tr><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td>The Jiku Mobile Video Dataset</td><td>The jiku mobile video dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the jiku mobile video dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d178cde92ab3dc0dd2ebee5a76a33d556c39448b</td></tr><tr><td>stair_actions</td><td>STAIR Action</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stair actions: a video dataset of everyday home actions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td><a href="http://pdfs.semanticscholar.org/dc8b/25e35a3acb812beb499844734081722319b4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret database and evaluation procedure for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dc8b25e35a3acb812beb499844734081722319b4</td></tr><tr><td>families_in_the_wild</td><td>FIW</td><td>Visual Kinship Recognition of Families in the Wild</td><td>Visual Kinship Recognition of Families in the Wild</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337841" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=visual kinship recognition of families in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td></tr><tr><td>scut_head</td><td>SCUT HEAD</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting heads using feature refine net and cascaded multi-scale architecture&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td></tr><tr><td>sarc3d</td><td>Sarc3D</td><td>SARC3D: a new 3D body model for People Tracking and Re-identification</td><td>SARC3D: A New 3D Body Model for People Tracking and Re-identification</td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sarc3d: a new 3d body model for people tracking and re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 faces In-the-wild challenge: Database and results</td><td>300 Faces In-The-Wild Challenge: database and results</td><td><a href="http://doi.org/10.1016/j.imavis.2016.01.002" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: database and results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e4754afaa15b1b53e70743880484b8d0736990ff</td></tr><tr><td>gfw</td><td>YouTube Pose</td><td>Merge or Not? Learning to Group Faces via Imitation Learning</td><td>Merge or Not? Learning to Group Faces via Imitation Learning</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merge or not? learning to group faces via imitation learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td></tr><tr><td>visual_phrases</td><td>Phrasal Recognition</td><td>Recognition using Visual Phrases</td><td>Recognition using visual phrases</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995711" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition using visual phrases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e8de844fefd54541b71c9823416daa238be65546</td></tr><tr><td>mpi_large</td><td>Large MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpi_small</td><td>Small MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>msmt_17</td><td>MSMT17</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td><a href="https://arxiv.org/pdf/1711.08565.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person transfer gan to bridge domain gap for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ec792ad2433b6579f2566c932ee414111e194537</td></tr><tr><td>europersons</td><td>EuroCity Persons</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the eurocity persons dataset: a novel benchmark for object detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td></tr><tr><td>pku</td><td>PKU</td><td>Swiss-System Based Cascade Ranking for Gait-based Person Re-identification</td><td>Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</td><td><a href="http://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=swiss-system based cascade ranking for gait-based person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: an evaluation of the state of the art&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td></tr><tr><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td>Automated human identification using ear imaging</td><td>Automated Human Identification Using Ear Imaging</td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automated human identification using ear imaging&sort=relevance" target="_blank">[s2]</a></td><td></td><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td></tr><tr><td>nd_2006</td><td>ND-2006</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401928" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=using a multi-instance enrollment representation to improve 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td></tr></table></body></html>
\ No newline at end of file diff --git a/scraper/reports/paper_title_report_nonmatching.html b/scraper/reports/paper_title_report_nonmatching.html index a59cf813..d24cec59 100644 --- a/scraper/reports/paper_title_report_nonmatching.html +++ b/scraper/reports/paper_title_report_nonmatching.html @@ -1,9 +1,3 @@ -<!doctype html><html><head><meta charset='utf-8'><title>Paper Titles that do not match</title><link rel='stylesheet' href='reports.css'></head><body><h2>Paper Titles that do not match</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>A 3 D Dynamic Database for Unconstrained Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>affectnet</td><td>AffectNet</td><td>AffectNet: A New Database for Facial Expression, Valence, and Arousal Computation in the Wild</td><td>Skybiometry and AffectNet on Facial Emotion Recognition Using Supervised Machine Learning Algorithms</td><td><a href="http://dl.acm.org/citation.cfm?id=3232665" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectnet: a new database for facial expression, valence, and arousal computation in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f152b6ee251cca940dd853c54e6a7b78fbc6b235</td></tr><tr><td>afw</td><td>AFW</td><td>Face detection, pose estimation and landmark localization in the wild</td><td>Face detection, pose estimation, and landmark localization in the wild</td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face detection, pose estimation and landmark localization in the wild&sort=relevance" target="_blank">[s2]</a></td><td>University of California, Irvine</td><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</td><td><a href="https://arxiv.org/pdf/1605.09653.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>bjut_3d</td><td>BJUT-3D</td><td>The BJUT-3D Large-Scale Chinese Face Database</td><td>A novel face recognition method based on 3D face model</td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the bjut-3d large-scale chinese face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td></tr><tr><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td>A high resolution spontaneous 3D dynamic facial expression database</td><td>A high-resolution spontaneous 3D dynamic facial expression database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a high resolution spontaneous 3d dynamic facial expression database&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Binghamton</td><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td></tr><tr><td>brainwash</td><td>Brainwash</td><td>Brainwash dataset</td><td>Brainwash: A Data System for Feature Engineering</td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=brainwash dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td></tr><tr><td>camel</td><td>CAMEL</td><td>CAMEL Dataset for Visual and Thermal Infrared Multiple Object Detection and Tracking</td><td>Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=camel dataset for visual and thermal infrared multiple object detection and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>celeba_plus</td><td>CelebFaces+</td><td>Deep Learning Face Representation from Predicting 10,000 Classes</td><td>Learning Deep Representation for Imbalanced Classification</td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2016_imbalanced.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep learning face representation from predicting 10,000 classes&sort=relevance" target="_blank">[s2]</a></td><td>Shenzhen Institutes of Advanced Technology</td><td>69a68f9cf874c69e2232f47808016c2736b90c35</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td><a href="https://arxiv.org/pdf/1604.01685.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>coco</td><td>COCO</td><td>Microsoft COCO: Common Objects in Context</td><td>Microsoft COCO Captions: Data Collection and Evaluation Server</td><td><a href="http://pdfs.semanticscholar.org/ba95/81c33a7eebe87c50e61763e4c8d1723538f2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=microsoft coco: common objects in context&sort=relevance" target="_blank">[s2]</a></td><td></td><td>696ca58d93f6404fea0fc75c62d1d7b378f47628</td></tr><tr><td>columbia_gaze</td><td>Columbia Gaze</td><td>Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction</td><td>A 3D Morphable Eye Region Model for Gaze Estimation</td><td><a href="https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gaze locking: passive eye contact detection for human–object interaction&sort=relevance" target="_blank">[s2]</a></td><td>Carnegie Mellon University</td><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On – board Pedestrian Detection</td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>d3dfacs</td><td>D3DFACS</td><td>A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling</td><td>A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a facs valid 3d dynamic action unit database with applications to 3d dynamic morphable facial modelling&sort=relevance" target="_blank">[s2]</a></td><td>Jacobs University</td><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td>Fashion Landmark Detection in the Wild</td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepfashion: powering robust clothes recognition and retrieval with rich annotations&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td></tr><tr><td>disfa</td><td>DISFA</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td>Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=disfa: a spontaneous facial action intensity database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a5acda0e8c0937bfed013e6382da127103e41395</td></tr><tr><td>expw</td><td>ExpW</td><td>Learning Social Relation Traits from Face Images</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="http://arxiv.org/abs/1609.06426" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>face_research_lab</td><td>Face Research Lab London</td><td>Face Research Lab London Set. figshare</td><td>Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face research lab london set. figshare&sort=relevance" target="_blank">[s2]</a></td><td>University College London</td><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td>Face swapping: automatically replacing faces in photographs</td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facetracer: a search engine for large collections of images with faces&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>670637d0303a863c1548d5b19f705860a23e285c</td></tr><tr><td>fddb</td><td>FDDB</td><td>FDDB: A Benchmark for Face Detection in Unconstrained Settings</td><td>A Benchmark for Face Detection in Unconstrained Settings</td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fddb: a benchmark for face detection in unconstrained settings&sort=relevance" target="_blank">[s2]</a></td><td>University of Massachusetts</td><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>Generalização cartográfica automatizada para um banco de dados cadastral</td><td><a href="https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 faces In-the-wild challenge: Database and results</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: database and results&sort=relevance" target="_blank">[s2]</a></td><td>University of Twente</td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: the first facial landmark localization challenge&sort=relevance" target="_blank">[s2]</a></td><td>University of Twente</td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>frav3d</td><td>FRAV3D</td><td>MULTIMODAL 2D, 2.5D & 3D FACE VERIFICATION</td><td>2D and 3D face recognition: A survey</td><td><a href="http://pdfs.semanticscholar.org/2f5d/44dc3e1b5955942133ff872ebd31716ec604.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal 2d, 2.5d & 3d face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2f5d44dc3e1b5955942133ff872ebd31716ec604</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>gallagher</td><td>Gallagher</td><td>Clothing Cosegmentation for Recognizing People</td><td>Clothing Cosegmentation for Shopping Images With Cluttered Background</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7423747" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing cosegmentation for recognizing people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dbe8e5121c534339d6e41f8683e85f87e6abf81</td></tr><tr><td>gavab_db</td><td>Gavab</td><td>GavabDB: a 3D face database</td><td>Expression invariant 3D face recognition with a Morphable Model</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gavabdb: a 3d face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>MAXIMUM LIKELIHOOD TRAINING OF THE EMBEDDED HMM FOR FACE DETECTION AND RECOGNITION Ara V. Ne an and Monson H. Hayes III Center for Signal and Image Processing School of Electrical and Computer Engineering</td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Generic Object Recognition with Boosting</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=generic object recognition with boosting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=weak hypotheses and boosting for generic object detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>The HDA+ data set for research on fully automated re-identification systems</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the hda+ data set for research on fully automated re-identification systems&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>High-resolution comprehensive 3-D dynamic database for facial articulation analysis</td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=is the eye region more reliable than the face? a preliminary study of face-based recognition on a transgender dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Investigating the Periocular-Based Face Recognition Across Gender Transformation</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating the periocular-based face recognition across gender transformation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Face recognition across gender transformation using SVM Classifier</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition across gender transformation using svm classifier&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database : A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database with age, pose and expression</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database with age, pose and expression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>ilids_mcts</td><td></td><td>Imagery Library for Intelligent Detection Systems: -The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems: -the i-lids user guide&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ilids_vid_reid</td><td>iLIDS-VID</td><td>Person Re-Identication by Video Ranking</td><td>Person Re-identification by Exploiting Spatio-Temporal Cues and Multi-view Metric Learning</td><td><a href="https://doi.org/10.1109/LSP.2016.2574323" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identication by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>99eb4cea0d9bc9fe777a5c5172f8638a37a7f262</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>imm_face</td><td>IMM Face Dataset</td><td>The IMM Face Database - An Annotated Dataset of 240 Face Images</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the imm face database - an annotated dataset of 240 face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td>The Jiku Mobile Video Dataset</td><td>A Synchronization Ground Truth for the Jiku Mobile Video Dataset</td><td><a href="http://pdfs.semanticscholar.org/ad62/c6e17bc39b4dec20d32f6ac667ae42d2c118.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the jiku mobile video dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ad62c6e17bc39b4dec20d32f6ac667ae42d2c118</td></tr><tr><td>kdef</td><td>KDEF</td><td>The Karolinska Directed Emotional Faces – KDEF</td><td>Gaze fixation and the neural circuitry of face processing in autism</td><td><a href="http://doi.org/10.1038/nn1421" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the karolinska directed emotional faces – kdef&sort=relevance" target="_blank">[s2]</a></td><td></td><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Genealogical Face Recognition based on UB KinFace Database</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=genealogical face recognition based on ub kinface database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Kinship Verification through Transfer Learning</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinship verification through transfer learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>The Role of Machine Vision for Intelligent Vehicles</td><td><a href="http://www.path.berkeley.edu/sites/default/files/my_folder_76/Pub_03.2016_Role.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>35ba4ebfd017a56b51e967105af9ae273c9b0178</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild : A Survey</td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: Updates and New Reporting Procedures</td><td>Labeled Faces in the Wild : Updates and New Reporting Procedures</td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: updates and new reporting procedures&sort=relevance" target="_blank">[s2]</a></td><td>University of Massachusetts</td><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td></tr><tr><td>lfw_a</td><td>LFW-a</td><td>Effective Unconstrained Face Recognition by - Combining Multiple Descriptors and Learned - Background Statistics</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=effective unconstrained face recognition by - combining multiple descriptors and learned - background statistics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>133f01aec1534604d184d56de866a4bd531dac87</td></tr><tr><td>m2vts</td><td>m2vts</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the m2vts multimodal face database (release 1.00)&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>mafl</td><td>MAFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>malf</td><td>MALF</td><td>Fine-grained Evaluation on Face Detection in the Wild.</td><td>Fine-grained evaluation on face detection in the wild</td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained evaluation on face detection in the wild.&sort=relevance" target="_blank">[s2]</a></td><td>Chinese Academy of Sciences</td><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Hierarchical Temporal Graphical Model for Head Pose Estimation and Subsequent Attribute Classification in Real-World Videos</td><td>Robust semi-automatic head pose labeling for real-world face video sequences</td><td><a href="https://doi.org/10.1007/s11042-012-1352-1" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos&sort=relevance" target="_blank">[s2]</a></td><td>McGill University</td><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><a href="https://pdfs.semanticscholar.org/be5b/455abd379240460d022a0e246615b0b86c14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance" target="_blank">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>muct</td><td>MUCT</td><td>The MUCT Landmarked Face Database</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the muct landmarked face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</td><td><a href="http://dl.acm.org/citation.cfm?id=2337184" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance" target="_blank">[s2]</a></td><td></td><td>109df0e8e5969ddf01e073143e83599228a1163f</td></tr><tr><td>names_and_faces_news</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Crowdsourcing facial expressions for affective-interaction</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=crowdsourcing facial expressions for affective-interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classication</td><td>Summary of Research on Informant Accuracy in Network Data, 11 and on the Reverse Small World Problem</td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Multi-Camera Activity Correlation Analysis</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="https://doi.org/10.1007/s11263-010-0347-5" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-camera activity correlation analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>Large Variability Surveillance Camera Face Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a spatio-temporal appearance representation for video-based pedestrian re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Local descriptors encoded by Fisher vectors for person re-identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=local descriptors encoded by fisher vectors for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sheffield</td><td>Sheffield Face</td><td>Face Recognition: From Theory to Applications</td><td>Face Description with Local Binary Patterns: Application to Face Recognition</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition: from theory to applications&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td></tr><tr><td>social_relation</td><td>Social Relation</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>Learning Social Relation Traits from Face Images</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>2a171f8d14b6b8735001a11c217af9587d095848</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Learning to Predict Human Behavior in Crowded Scenes</td><td><a href="http://pdfs.semanticscholar.org/c9bd/a86e23cab9e4f15ea0c4cbb6cc02b9dfb709.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the sun attribute database: beyond categories for deeper scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>SUN Attribute Database: +<!doctype html><html><head><meta charset='utf-8'><title>Paper Titles that do not match</title><link rel='stylesheet' href='reports.css'></head><body><h2>Paper Titles that do not match</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>ilids_mcts</td><td>i-LIDS</td><td>Imagery Library for Intelligent Detection Systems: The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems: the i-lids user guide&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>columbia_gaze</td><td>Columbia Gaze</td><td>Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction</td><td>Gaze locking: passive eye contact detection for human-object interaction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gaze locking: passive eye contact detection for human–object interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>06f02199690961ba52997cde1527e714d2b3bf8f</td></tr><tr><td>d3dfacs</td><td>D3DFACS</td><td>A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling</td><td>A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a facs valid 3d dynamic action unit database with applications to 3d dynamic morphable facial modelling&sort=relevance" target="_blank">[s2]</a></td><td>Jacobs University</td><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>afw</td><td>AFW</td><td>Face detection, pose estimation and landmark localization in the wild</td><td>Face detection, pose estimation, and landmark localization in the wild</td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face detection, pose estimation and landmark localization in the wild&sort=relevance" target="_blank">[s2]</a></td><td>University of California, Irvine</td><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: Updates and New Reporting Procedures</td><td>Labeled Faces in the Wild : Updates and New Reporting Procedures</td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: updates and new reporting procedures&sort=relevance" target="_blank">[s2]</a></td><td>University of Massachusetts</td><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td></tr><tr><td>names_and_faces_news</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>MAXIMUM LIKELIHOOD TRAINING OF THE EMBEDDED HMM FOR FACE DETECTION AND RECOGNITION Ara V. Ne an and Monson H. Hayes III Center for Signal and Image Processing School of Electrical and Computer Engineering</td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>malf</td><td>MALF</td><td>Fine-grained Evaluation on Face Detection in the Wild.</td><td>Fine-grained evaluation on face detection in the wild</td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained evaluation on face detection in the wild.&sort=relevance" target="_blank">[s2]</a></td><td>Chinese Academy of Sciences</td><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>A 3 D Dynamic Database for Unconstrained Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database : A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Social LSTM: Human Trajectory Prediction in Crowded Spaces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><a href="https://pdfs.semanticscholar.org/be5b/455abd379240460d022a0e246615b0b86c14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance" target="_blank">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On – board Pedestrian Detection</td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition , Reacquisition , and Tracking</td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</td><td><a href="https://arxiv.org/pdf/1605.09653.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><a href="https://arxiv.org/pdf/1705.07426.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>fddb</td><td>FDDB</td><td>FDDB: A Benchmark for Face Detection in Unconstrained Settings</td><td>A Benchmark for Face Detection in Unconstrained Settings</td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fddb: a benchmark for face detection in unconstrained settings&sort=relevance" target="_blank">[s2]</a></td><td>University of Massachusetts</td><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>77c81c13a110a341c140995bedb98101b9e84f7f</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild : A Survey</td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>SUN Attribute Database: Discovering, Annotating, and Recognizing Scene Attributes</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sun attribute database: -discovering, annotating, and recognizing scene attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Texas 3D Face Recognition Database</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=texas 3d face recognition database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><a href="https://arxiv.org/pdf/1705.07426.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition , Reacquisition , and Tracking</td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>voc</td><td>VOC</td><td>The PASCAL Visual Object Classes (VOC) Challenge</td><td>The Pascal Visual Object Classes Challenge: A Retrospective</td><td><a href="http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the pascal visual object classes (voc) challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>abe9f3b91fd26fa1b50cd685c0d20debfb372f73</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><a href="http://doi.acm.org/10.1145/2996913.2996997" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>77c81c13a110a341c140995bedb98101b9e84f7f</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>Acquiring Linear Subspaces for Face Recognition under Variable Lighting</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=acquiring linear subspaces for face recognition under variable lighting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yfcc_100m</td><td>YFCC100M</td><td>YFCC100M: The New Data in Multimedia Research</td><td>The New Data and New Challenges in Multimedia Research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yfcc100m: the new data in multimedia research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a6e695ddd07aad719001c0fc1129328452385949</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Automatic facial makeup detection with application in face recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr></table></body></html>
\ No newline at end of file +discovering, annotating, and recognizing scene attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>A new ranking method for principal components analysis and its application to face image analysis</td><td><a href="https://doi.org/10.1016/j.imavis.2009.11.005" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td></tr><tr><td>ilids_vid_reid</td><td>iLIDS-VID</td><td>Person Re-Identication by Video Ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identication by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><a href="http://doi.acm.org/10.1145/2996913.2996997" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td>A high resolution spontaneous 3D dynamic facial expression database</td><td>A high-resolution spontaneous 3D dynamic facial expression database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a high resolution spontaneous 3d dynamic facial expression database&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Binghamton</td><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr></table></body></html>
\ No newline at end of file diff --git a/scraper/s2-final-report.py b/scraper/s2-final-report.py index 489e43df..58ac481f 100644 --- a/scraper/s2-final-report.py +++ b/scraper/s2-final-report.py @@ -9,6 +9,7 @@ import subprocess from util import * DIR_PUBLIC_CITATIONS = "../site/datasets/final" +DIR_UNKNOWN_CITATIONS = "../site/datasets/unknown" @click.command() def s2_final_report(): @@ -23,15 +24,16 @@ def s2_final_report(): def process_paper(row, addresses): aggregate_citations = {} + unknown_citations = {} address = None papers = [] print(row['paper_ids']) for paper_id in row['paper_ids']: - res = process_single_paper(row, paper_id, addresses, aggregate_citations) + res = process_single_paper(row, paper_id, addresses, aggregate_citations, unknown_citations) if res: papers.append(res) - if res['address']: - address = res['address'] + if res['address']: + address = res['address'] if not len(papers): return with open('{}/{}.json'.format(DIR_PUBLIC_CITATIONS, row['key']), 'w') as f: @@ -42,8 +44,14 @@ def process_paper(row, addresses): 'additional_papers': papers[1:], 'citations': [aggregate_citations[key] for key in aggregate_citations.keys()], }, f) + with open('{}/{}.json'.format(DIR_UNKNOWN_CITATIONS, row['key']), 'w') as f: + json.dump({ + 'id': papers[0]['paper_id'], + 'paper': papers[0], + 'citations': [unknown_citations[key] for key in unknown_citations.keys()], + }, f) -def process_single_paper(row, paper_id, addresses, aggregate_citations): +def process_single_paper(row, paper_id, addresses, aggregate_citations, unknown_citations): res = { 'paper_id': '', 'key': '', @@ -60,13 +68,6 @@ def process_single_paper(row, paper_id, addresses, aggregate_citations): # 'citations_doi': 0, } - geocoded_citations = [] - unknown_citations = [] - empty_citations = [] - pdf_count = 0 - doi_count = 0 - address_count = 0 - fn = file_path('papers', paper_id, 'paper.json') with open(fn, 'r') as f: @@ -103,14 +104,16 @@ def process_single_paper(row, paper_id, addresses, aggregate_citations): citationId = cite['paperId'] if citationId in aggregate_citations: continue + elif citationId in unknown_citations: + continue seen_here = {} citation = load_paper(citationId) has_pdf = os.path.exists(file_path('pdf', citationId, 'paper.txt')) has_doi = os.path.exists(file_path('doi', citationId, 'paper.doi')) - if has_pdf: - pdf_count += 1 - if has_doi: - doi_count += 1 + # if has_pdf: + # pdf_count += 1 + # if has_doi: + # doi_count += 1 if citation is None or citation.data is None: print("Citation missing! {}".format(cite['paperId'])) continue @@ -120,7 +123,7 @@ def process_single_paper(row, paper_id, addresses, aggregate_citations): institution = '' address = None for inst in sorted(institutions, key=operator.itemgetter(1)): - address_count += 1 + # address_count += 1 institution = inst[1] next_address = addresses.findObject(institution) if next_address and next_address['address'] not in seen_here: @@ -142,21 +145,20 @@ def process_single_paper(row, paper_id, addresses, aggregate_citations): address = next_address geocoded_addresses.append(next_address) if address: - if citationId not in aggregate_citations: - aggregate_citations[citationId] = { - 'id': citationId, - 'title': citation.title, - 'addresses': geocoded_addresses, - 'year': citation.year, - 'pdf': citation.pdf_link, - } - - # res['citation_count'] = len(data['citations']) - # res['citations_geocoded'] = len(geocoded_citations) - # res['citations_unknown'] = len(unknown_citations) - # res['citations_empty'] = len(empty_citations) - # res['citations_pdf'] = pdf_count - # res['citations_doi'] = doi_count + aggregate_citations[citationId] = { + 'id': citationId, + 'title': citation.title, + 'addresses': geocoded_addresses, + 'year': citation.year, + 'pdf': citation.pdf_link, + } + else: + unknown_citations[citationId] = { + 'id': citationId, + 'title': citation.title, + 'year': citation.year, + 'pdf': citation.pdf_link, + } return res def load_ft_lookup(): @@ -179,6 +181,8 @@ def load_megapixels_lookup(): rec = {} for index, key in enumerate(keys): rec[key] = row[index] + if rec['paper_id'] == "": + continue paper_key = rec['key'] if paper_key not in lookup: rec['paper_ids'] = [] diff --git a/scraper/s2-geocode-server.py b/scraper/s2-geocode-server.py new file mode 100644 index 00000000..0b1b0937 --- /dev/null +++ b/scraper/s2-geocode-server.py @@ -0,0 +1,68 @@ +#!python + +import os +import sys +import json +import time +import argparse +from datetime import datetime +from flask import Flask, request, render_template, jsonify + +from dotenv import load_dotenv +load_dotenv() + +from util import * + +locations_worksheet = fetch_worksheet('paper_locations') + +app = Flask(__name__, static_url_path="/reports", static_folder=os.path.abspath("reports")) + +# static api route +@app.route('/', methods=['GET']) +def index(): + return app.send_static_file('geocode_papers.html') + +@app.errorhandler(404) +def page_not_found(e): + return app.send_static_file('geocode_papers.html') + +# route to get all the manually geocoded IDs (to dedupe) +# route to add a geocoding for a paper + +@app.route('/api/institutions', methods=['GET']) +def list_locations(): + addresses = AddressBook() + return jsonify({ + 'entities': addresses.entities, + 'lookup': addresses.lookup, + }) + +@app.route('/api/papers', methods=['GET']) +def list_papers(): + lookup_keys, lines = fetch_google_sheet('citation_lookup') + paper_lookup = {} + for line in lines: + paper_lookup[line[0]] = line + return jsonify({ + 'papers': paper_lookup, + }) + +@app.route('/api/address', methods=['POST']) +def add_address(): + # id, title, institution_1, institution_2, institution_3, institution_4, notes + locations_worksheet.insert_row([ + request.form['paper_id'], + request.form['title'], + request.form['institution_1'], + request.form['institution_2'], + request.form['institution_3'], + request.form['institution_4'], + request.form['notes'], + ]) + return jsonify({ + 'status': 'ok' + }) + +if __name__=="__main__": + app.run("0.0.0.0", debug=False) + diff --git a/scraper/s2-papers.py b/scraper/s2-papers.py index 9a584e29..744454b7 100644 --- a/scraper/s2-papers.py +++ b/scraper/s2-papers.py @@ -23,8 +23,16 @@ def fetch_papers(): no_location_rows = [] nonmatching_rows = [] for line in lines: - key, name, title, paper_id, is_unknown = line + # key, name, title, paper_id, is_unknown, notes = line + key = line[0] + name = line[1] + title = line[2] + paper_id = line[3] + if paper_id == '': + continue paper = fetch_paper(s2, paper_id) + if paper is None: + continue db_paper = load_paper(paper_id) pdf_link = db_paper.pdf_link if db_paper else "" diff --git a/scraper/util.py b/scraper/util.py index 2d7c2ccb..9b47510a 100644 --- a/scraper/util.py +++ b/scraper/util.py @@ -331,11 +331,11 @@ def fetch_paper(s2, paper_id): print(paper_id) paper = s2.paper(paper_id) if paper is None: - print("Got none paper??") + print("Paper not found: {}".format(paper_id)) # time.sleep(random.randint(1, 2)) paper = s2.paper(paper_id) if paper is None: - print("Paper not found") + # print("Paper not found") return None write_json(paper_fn, paper) # time.sleep(random.randint(1, 2)) @@ -343,7 +343,8 @@ def fetch_paper(s2, paper_id): def fetch_spreadsheet(): scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive'] - credentials = ServiceAccountCredentials.from_json_keyfile_name('./.creds/Megapixels-ef28f91112a9.json', scope) + path = os.path.dirname(os.path.abspath(__file__)) + credentials = ServiceAccountCredentials.from_json_keyfile_name(os.path.join(path, '.creds/Megapixels-ef28f91112a9.json'), scope) docid = "1denb7TjYsN9igHyvYah7fQ0daABW32Z30lwV7QrDJQc" client = gspread.authorize(credentials) spreadsheet = client.open_by_key(docid) diff --git a/site/datasets/final/adience.csv b/site/datasets/final/adience.csv new file mode 100644 index 00000000..9c9f2b76 --- /dev/null +++ b/site/datasets/final/adience.csv @@ -0,0 +1,102 @@ +index,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,Adience,adience,0.0,0.0,,,1be498d4bbc30c3bfd0029114c784bc2114d67c0,main,http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf,Age and Gender Estimation of Unfiltered Faces,2014 +1,Adience,adience,28.5456282,77.2731505,"IIIT Delhi, India",edu,f726738954e7055bb3615fa7e8f59f136d3e0bdc,citation,https://arxiv.org/pdf/1803.07385.pdf,Are you eligible? Predicting adulthood from face images via class specific mean autoencoder,2018 +2,Adience,adience,37.43131385,-122.16936535,Stanford University,edu,16d6737b50f969247339a6860da2109a8664198a,citation,https://pdfs.semanticscholar.org/16d6/737b50f969247339a6860da2109a8664198a.pdf,Convolutional Neural Networks for Age and Gender Classification,2016 +3,Adience,adience,40.00229045,116.32098908,Tsinghua University,edu,2149d49c84a83848d6051867290d9c8bfcef0edb,citation,https://doi.org/10.1109/TIFS.2017.2746062,Label-Sensitive Deep Metric Learning for Facial Age Estimation,2018 +4,Adience,adience,51.5217668,-0.13019072,University of London,edu,31ea88f29e7f01a9801648d808f90862e066f9ea,citation,https://arxiv.org/pdf/1605.06391.pdf,Deep Multi-task Representation Learning: A Tensor Factorisation Approach,2016 +5,Adience,adience,40.0044795,116.370238,Chinese Academy of Sciences,edu,d492dbfaa42b4f8b8a74786d7343b3be6a3e9a1d,citation,https://pdfs.semanticscholar.org/d492/dbfaa42b4f8b8a74786d7343b3be6a3e9a1d.pdf,Deep Cost-Sensitive and Order-Preserving Feature Learning for Cross-Population Age Estimation,0 +6,Adience,adience,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,d492dbfaa42b4f8b8a74786d7343b3be6a3e9a1d,citation,https://pdfs.semanticscholar.org/d492/dbfaa42b4f8b8a74786d7343b3be6a3e9a1d.pdf,Deep Cost-Sensitive and Order-Preserving Feature Learning for Cross-Population Age Estimation,0 +7,Adience,adience,34.0224149,-118.28634407,University of Southern California,edu,29f298dd5f806c99951cb434834bc8dcc765df18,citation,https://doi.org/10.1109/ICPR.2016.7899837,Computationally efficient template-based face recognition,2016 +8,Adience,adience,45.5039761,-73.5749687,McGill University,edu,ed9d11e995baeec17c5d2847ec1a8d5449254525,citation,https://pdfs.semanticscholar.org/ed9d/11e995baeec17c5d2847ec1a8d5449254525.pdf,Efficient Gender Classification Using a Deep LDA-Pruned Net,2017 +9,Adience,adience,12.9551259,77.5741985,Bangalore Institute of Technology,edu,10126b467391e153d36f1a496ef5618097775ad1,citation,https://pdfs.semanticscholar.org/1012/6b467391e153d36f1a496ef5618097775ad1.pdf,An Active Age Estimation of Facial image using Anthropometric Model and Fast ICA,2017 +10,Adience,adience,42.36782045,-71.12666653,Harvard University,edu,0ba402af3b8682e2aa89f76bd823ddffdf89fa0a,citation,http://pdfs.semanticscholar.org/c0d8/4377168c554cb8e83099bed940091fe49dec.pdf,Squared Earth Mover's Distance-based Loss for Training Deep Neural Networks,2016 +11,Adience,adience,40.9153196,-73.1270626,Stony Brook University,edu,0ba402af3b8682e2aa89f76bd823ddffdf89fa0a,citation,http://pdfs.semanticscholar.org/c0d8/4377168c554cb8e83099bed940091fe49dec.pdf,Squared Earth Mover's Distance-based Loss for Training Deep Neural Networks,2016 +12,Adience,adience,38.8760446,115.4973873,North China Electric Power University,edu,56f86bef26209c85f2ef66ec23b6803d12ca6cd6,citation,http://arxiv.org/abs/1710.00307,Pyramidal RoR for image classification,2017 +13,Adience,adience,40.00229045,116.32098908,Tsinghua University,edu,51f626540860ad75b68206025a45466a6d087aa6,citation,https://doi.org/10.1109/ICIP.2017.8296595,Cluster convolutional neural networks for facial age estimation,2017 +14,Adience,adience,45.5039761,-73.5749687,McGill University,edu,407bb798ab153bf6156ba2956f8cf93256b6910a,citation,http://pdfs.semanticscholar.org/407b/b798ab153bf6156ba2956f8cf93256b6910a.pdf,Fisher Pruning of Deep Nets for Facial Trait Classification,2018 +15,Adience,adience,39.2899685,-76.62196103,University of Maryland,edu,81fc86e86980a32c47410f0ba7b17665048141ec,citation,http://pdfs.semanticscholar.org/81fc/86e86980a32c47410f0ba7b17665048141ec.pdf,Segment-based Methods for Facial Attribute Detection from Partial Faces,2018 +16,Adience,adience,22.304572,114.17976285,Hong Kong Polytechnic University,edu,dc2f16f967eac710cb9b7553093e9c977e5b761d,citation,https://doi.org/10.1109/ICPR.2016.7900141,Learning a lightweight deep convolutional network for joint age and gender recognition,2016 +17,Adience,adience,23.09461185,113.28788994,Sun Yat-Sen University,edu,dc2f16f967eac710cb9b7553093e9c977e5b761d,citation,https://doi.org/10.1109/ICPR.2016.7900141,Learning a lightweight deep convolutional network for joint age and gender recognition,2016 +18,Adience,adience,39.65404635,-79.96475355,West Virginia University,edu,7a65fc9e78eff3ab6062707deaadde024d2fad40,citation,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Zhu_A_Study_on_ICCV_2015_paper.pdf,A Study on Apparent Age Estimation,2015 +19,Adience,adience,42.357757,-83.06286711,Wayne State University,edu,28d99dc2d673d62118658f8375b414e5192eac6f,citation,http://www.cs.wayne.edu/~mdong/cvpr17.pdf,Using Ranking-CNN for Age Estimation,2017 +20,Adience,adience,25.0410728,121.6147562,Institute of Information Science,edu,0951f42abbf649bb564a21d4ff5dddf9a5ea54d9,citation,https://arxiv.org/pdf/1806.02023.pdf,Joint Estimation of Age and Gender from Unconstrained Face Images Using Lightweight Multi-Task CNN for Mobile Applications,2018 +21,Adience,adience,34.0224149,-118.28634407,University of Southern California,edu,eb6ee56e085ebf473da990d032a4249437a3e462,citation,http://www-scf.usc.edu/~chuntinh/doc/Age_Gender_Classification_APSIPA_2017.pdf,Age/gender classification with whole-component convolutional neural networks (WC-CNN),2017 +22,Adience,adience,32.77824165,34.99565673,Open University of Israel,edu,0a34fe39e9938ae8c813a81ae6d2d3a325600e5c,citation,https://arxiv.org/pdf/1708.07517.pdf,FacePoseNet: Making a Case for Landmark-Free Face Alignment,2017 +23,Adience,adience,40.51865195,-74.44099801,State University of New Jersey,edu,d00e9a6339e34c613053d3b2c132fccbde547b56,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791154,A cascaded convolutional neural network for age estimation of unconstrained faces,2016 +24,Adience,adience,39.2899685,-76.62196103,University of Maryland,edu,d00e9a6339e34c613053d3b2c132fccbde547b56,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791154,A cascaded convolutional neural network for age estimation of unconstrained faces,2016 +25,Adience,adience,32.8536333,-117.2035286,Kyung Hee University,edu,9d4692e243e25eb465a0480376beb60a5d2f0f13,citation,https://doi.org/10.1109/ICCE.2016.7430617,Positional Ternary Pattern (PTP): An edge based image descriptor for human age recognition,2016 +26,Adience,adience,1.340216,103.965089,Singapore University of Technology and Design,edu,00823e6c0b6f1cf22897b8d0b2596743723ec51c,citation,https://arxiv.org/pdf/1708.07689.pdf,Understanding and Comparing Deep Neural Networks for Age and Gender Classification,2017 +27,Adience,adience,45.47567215,9.23336232,Università degli Studi di Milano,edu,a713a01971e73d0c3118d0409dc7699a24f521d6,citation,https://doi.org/10.1109/SSCI.2017.8285381,Age estimation based on face images and pre-trained convolutional neural networks,2017 +28,Adience,adience,37.2830003,127.04548469,Ajou University,edu,c43dc4ae68a317b34a79636fadb3bcc4d1ccb61c,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369763,Age and gender estimation using deep residual learning network,2018 +29,Adience,adience,37.403917,127.159786,Korea Electronics Technology Institute,edu,c43dc4ae68a317b34a79636fadb3bcc4d1ccb61c,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369763,Age and gender estimation using deep residual learning network,2018 +30,Adience,adience,37.26728,126.9841151,Seoul National University,edu,c43dc4ae68a317b34a79636fadb3bcc4d1ccb61c,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369763,Age and gender estimation using deep residual learning network,2018 +31,Adience,adience,49.2767454,-122.91777375,Simon Fraser University,edu,975978ee6a32383d6f4f026b944099e7739e5890,citation,https://pdfs.semanticscholar.org/9759/78ee6a32383d6f4f026b944099e7739e5890.pdf,Privacy-Preserving Age Estimation for Content Rating,2018 +32,Adience,adience,49.8091536,-97.13304179,University of Manitoba,edu,975978ee6a32383d6f4f026b944099e7739e5890,citation,https://pdfs.semanticscholar.org/9759/78ee6a32383d6f4f026b944099e7739e5890.pdf,Privacy-Preserving Age Estimation for Content Rating,2018 +33,Adience,adience,33.7774349,-84.3973208,"College of Computing, Georgia Tech",edu,58df849378fbcfb6b1a8ebddfbe4caa450226b9d,citation,https://doi.org/10.1109/ICIP.2017.8296770,Head pose estimation using learned discretization,2017 +34,Adience,adience,39.95472495,-75.15346905,Temple University,edu,58df849378fbcfb6b1a8ebddfbe4caa450226b9d,citation,https://doi.org/10.1109/ICIP.2017.8296770,Head pose estimation using learned discretization,2017 +35,Adience,adience,36.1017956,-79.501733,Elon University,edu,58df849378fbcfb6b1a8ebddfbe4caa450226b9d,citation,https://doi.org/10.1109/ICIP.2017.8296770,Head pose estimation using learned discretization,2017 +36,Adience,adience,23.7289899,90.3982682,Institute of Information Technology,edu,2e58ec57d71b2b2a3e71086234dd7037559cc17e,citation,https://pdfs.semanticscholar.org/2e58/ec57d71b2b2a3e71086234dd7037559cc17e.pdf,A Gender Recognition System from Facial Image,2018 +37,Adience,adience,23.7316957,90.3965275,University of Dhaka,edu,2e58ec57d71b2b2a3e71086234dd7037559cc17e,citation,https://pdfs.semanticscholar.org/2e58/ec57d71b2b2a3e71086234dd7037559cc17e.pdf,A Gender Recognition System from Facial Image,2018 +38,Adience,adience,37.98782705,23.73179733,National Technical University of Athens,edu,bd572e9cbec095bcf5700cb7cd73d1cdc2fe02f4,citation,http://pdfs.semanticscholar.org/bd57/2e9cbec095bcf5700cb7cd73d1cdc2fe02f4.pdf,Deep Learning for Computer Vision: A Brief Review,2018 +39,Adience,adience,47.00646895,-120.5367304,Central Washington University,edu,56c2fb2438f32529aec604e6fc3b06a595ddbfcc,citation,http://pdfs.semanticscholar.org/56c2/fb2438f32529aec604e6fc3b06a595ddbfcc.pdf,Comparison of Recent Machine Learning Techniques for Gender Recognition from Facial Images,2016 +40,Adience,adience,32.77824165,34.99565673,Open University of Israel,edu,c75e6ce54caf17b2780b4b53f8d29086b391e839,citation,https://arxiv.org/pdf/1802.00542.pdf,"ExpNet: Landmark-Free, Deep, 3D Facial Expressions",2018 +41,Adience,adience,31.83907195,117.26420748,University of Science and Technology of China,edu,47cd161546c59ab1e05f8841b82e985f72e5ddcb,citation,https://doi.org/10.1109/ICIP.2017.8296552,Gender classification in live videos,2017 +42,Adience,adience,25.0410728,121.6147562,Institute of Information Science,edu,1862f2df2e278505c9ca970f9c5a25ea3aeb9686,citation,https://pdfs.semanticscholar.org/1862/f2df2e278505c9ca970f9c5a25ea3aeb9686.pdf,Merging Deep Neural Networks for Mobile Devices,0 +43,Adience,adience,45.42580475,-75.68740118,University of Ottawa,edu,16820ccfb626dcdc893cc7735784aed9f63cbb70,citation,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W12/papers/Azarmehr_Real-Time_Embedded_Age_2015_CVPR_paper.pdf,Real-time embedded age and gender classification in unconstrained video,2015 +44,Adience,adience,37.26728,126.9841151,Seoul National University,edu,282503fa0285240ef42b5b4c74ae0590fe169211,citation,http://pdfs.semanticscholar.org/2825/03fa0285240ef42b5b4c74ae0590fe169211.pdf,Feeding Hand-Crafted Features for Enhancing the Performance of Convolutional Neural Networks,2018 +45,Adience,adience,32.8536333,-117.2035286,Kyung Hee University,edu,854b1f0581f5d3340f15eb79452363cbf38c04c8,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7903648,Directional Age-Primitive Pattern (DAPP) for Human Age Group Recognition and Age Estimation,2017 +46,Adience,adience,24.7246403,46.62335012,King Saud University,edu,854b1f0581f5d3340f15eb79452363cbf38c04c8,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7903648,Directional Age-Primitive Pattern (DAPP) for Human Age Group Recognition and Age Estimation,2017 +47,Adience,adience,23.7289899,90.3982682,Institute of Information Technology,edu,854b1f0581f5d3340f15eb79452363cbf38c04c8,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7903648,Directional Age-Primitive Pattern (DAPP) for Human Age Group Recognition and Age Estimation,2017 +48,Adience,adience,53.21967825,6.56251482,University of Groningen,edu,361c9ba853c7d69058ddc0f32cdbe94fbc2166d5,citation,http://pdfs.semanticscholar.org/361c/9ba853c7d69058ddc0f32cdbe94fbc2166d5.pdf,Deep Reinforcement Learning of Video Games,2017 +49,Adience,adience,41.1664858,-73.1920564,University of Bridgeport,edu,ac9a331327cceda4e23f9873f387c9fd161fad76,citation,http://pdfs.semanticscholar.org/ac9a/331327cceda4e23f9873f387c9fd161fad76.pdf,Deep Convolutional Neural Network for Age Estimation based on VGG-Face Model,2017 +50,Adience,adience,53.21967825,6.56251482,University of Groningen,edu,4ff4c27e47b0aa80d6383427642bb8ee9d01c0ac,citation,https://doi.org/10.1109/SSCI.2015.37,Deep Convolutional Neural Networks and Support Vector Machines for Gender Recognition,2015 +51,Adience,adience,40.4319722,-86.92389368,Purdue University,edu,6193c833ad25ac27abbde1a31c1cabe56ce1515b,citation,https://pdfs.semanticscholar.org/5f25/7ca18a92c3595db3bda3224927ec494003a5.pdf,Trojaning Attack on Neural Networks,2018 +52,Adience,adience,40.4319722,-86.92389368,Purdue University,edu,b18858ad6ec88d8b443dffd3e944e653178bc28b,citation,http://pdfs.semanticscholar.org/b188/58ad6ec88d8b443dffd3e944e653178bc28b.pdf,Trojaning Attack on Neural Networks,2017 +53,Adience,adience,40.9153196,-73.1270626,Stony Brook University,edu,25bf288b2d896f3c9dab7e7c3e9f9302e7d6806b,citation,http://pdfs.semanticscholar.org/25bf/288b2d896f3c9dab7e7c3e9f9302e7d6806b.pdf,Neural Networks with Smooth Adaptive Activation Functions for Regression,2016 +54,Adience,adience,40.9153196,-73.1270626,Stony Brook University,edu,1190cba0cae3c8bb81bf80d6a0a83ae8c41240bc,citation,https://pdfs.semanticscholar.org/1190/cba0cae3c8bb81bf80d6a0a83ae8c41240bc.pdf,Squared Earth Mover ’ s Distance Loss for Training Deep Neural Networks on Ordered-Classes,2017 +55,Adience,adience,40.9153196,-73.1270626,Stony Brook University,edu,14e9158daf17985ccbb15c9cd31cf457e5551990,citation,http://pdfs.semanticscholar.org/14e9/158daf17985ccbb15c9cd31cf457e5551990.pdf,ConvNets with Smooth Adaptive Activation Functions for Regression,2017 +56,Adience,adience,40.90826665,-73.11520891,Stony Brook University Hospital,edu,14e9158daf17985ccbb15c9cd31cf457e5551990,citation,http://pdfs.semanticscholar.org/14e9/158daf17985ccbb15c9cd31cf457e5551990.pdf,ConvNets with Smooth Adaptive Activation Functions for Regression,2017 +57,Adience,adience,45.5039761,-73.5749687,McGill University,edu,13719bbb4bb8bbe0cbcdad009243a926d93be433,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w6/papers/Tian_Deep_LDA-Pruned_Nets_CVPR_2017_paper.pdf,Deep LDA-Pruned Nets for Efficient Facial Gender Classification,2017 +58,Adience,adience,41.10427915,29.02231159,Istanbul Technical University,edu,fd53be2e0a9f33080a9db4b5a5e416e24ae8e198,citation,https://arxiv.org/pdf/1606.02909.pdf,Apparent Age Estimation Using Ensemble of Deep Learning Models,2016 +59,Adience,adience,47.3804685,8.5430355,"Disney Research, Zurich",edu,017e94ad51c9be864b98c9b75582753ce6ee134f,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7892240,Rapid one-shot acquisition of dynamic VR avatars,2017 +60,Adience,adience,34.1579742,-118.2894729,"Disney Research, UK",company,017e94ad51c9be864b98c9b75582753ce6ee134f,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7892240,Rapid one-shot acquisition of dynamic VR avatars,2017 +61,Adience,adience,34.1619174,-118.2883702,Walt Disney Imagineering,company,017e94ad51c9be864b98c9b75582753ce6ee134f,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7892240,Rapid one-shot acquisition of dynamic VR avatars,2017 +62,Adience,adience,49.2593879,-122.9151893,"AltumView Systems Inc., Burnaby, BC, Canada",company,b44f03b5fa8c6275238c2d13345652e6ff7e6ea9,citation,https://doi.org/10.1109/GlobalSIP.2017.8309138,Lapped convolutional neural networks for embedded systems,2017 +63,Adience,adience,37.2830003,127.04548469,Ajou University,edu,24286ef164f0e12c3e9590ec7f636871ba253026,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369721,Age and gender classification using wide convolutional neural network and Gabor filter,2018 +64,Adience,adience,37.26728,126.9841151,Seoul National University,edu,24286ef164f0e12c3e9590ec7f636871ba253026,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369721,Age and gender classification using wide convolutional neural network and Gabor filter,2018 +65,Adience,adience,47.6543238,-122.30800894,University of Washington,edu,96e0cfcd81cdeb8282e29ef9ec9962b125f379b0,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527,The MegaFace Benchmark: 1 Million Faces for Recognition at Scale,2016 +66,Adience,adience,65.0592157,25.46632601,University of Oulu,edu,1fe121925668743762ce9f6e157081e087171f4c,citation,https://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W02/papers/Ylioinas_Unsupervised_Learning_of_2015_CVPR_paper.pdf,Unsupervised learning of overcomplete face descriptors,2015 +67,Adience,adience,23.0886214,-82.4481944,"Advanced Technologies Application Center, Havana, Cuba",edu,c5eba789aeb41904aa1b03fad1dc7cea5d0cd3b6,citation,https://doi.org/10.1109/BTAS.2017.8272773,Age and gender classification using local appearance descriptors from facial components,2017 +68,Adience,adience,40.7240176,8.5578947,University of Sassari,edu,c5eba789aeb41904aa1b03fad1dc7cea5d0cd3b6,citation,https://doi.org/10.1109/BTAS.2017.8272773,Age and gender classification using local appearance descriptors from facial components,2017 +69,Adience,adience,36.3697191,127.362537,Korea Advanced Institute of Science and Technology,edu,cb27b45329d61f5f95ed213798d4b2a615e76be2,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8329236,Deep Facial Age Estimation Using Conditional Multitask Learning With Weak Label Expansion,2018 +70,Adience,adience,37.2520226,127.0555019,"Samsung SAIT, Korea",company,cb27b45329d61f5f95ed213798d4b2a615e76be2,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8329236,Deep Facial Age Estimation Using Conditional Multitask Learning With Weak Label Expansion,2018 +71,Adience,adience,22.42031295,114.20788644,Chinese University of Hong Kong,edu,d80a3d1f3a438e02a6685e66ee908446766fefa9,citation,https://arxiv.org/pdf/1708.09687.pdf,Quantifying Facial Age by Posterior of Age Comparisons,2017 +72,Adience,adience,42.357757,-83.06286711,Wayne State University,edu,4f1249369127cc2e2894f6b2f1052d399794919a,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8239663,Deep Age Estimation: From Classification to Ranking,2018 +73,Adience,adience,35.2742655,137.01327841,Chubu University,edu,5fb5d9389e2a2a4302c81bcfc068a4c8d4efe70c,citation,http://pdfs.semanticscholar.org/5fb5/d9389e2a2a4302c81bcfc068a4c8d4efe70c.pdf,Multiple Facial Attributes Estimation Based on Weighted Heterogeneous Learning,2016 +74,Adience,adience,1.3484104,103.68297965,Nanyang Technological University,edu,d0471d5907d6557cf081edf4c7c2296c3c221a38,citation,https://pdfs.semanticscholar.org/d047/1d5907d6557cf081edf4c7c2296c3c221a38.pdf,A Constrained Deep Neural Network for Ordinal Regression,0 +75,Adience,adience,41.3868913,2.16352385,University of Barcelona,edu,500fbe18afd44312738cab91b4689c12b4e0eeee,citation,http://www.maia.ub.es/~sergio/linked/ijcnn_age_and_cultural_2015.pdf,ChaLearn looking at people 2015 new competitions: Age estimation and cultural event recognition,2015 +76,Adience,adience,45.4312742,12.3265377,University of Venezia,edu,500fbe18afd44312738cab91b4689c12b4e0eeee,citation,http://www.maia.ub.es/~sergio/linked/ijcnn_age_and_cultural_2015.pdf,ChaLearn looking at people 2015 new competitions: Age estimation and cultural event recognition,2015 +77,Adience,adience,40.47913175,-74.43168868,Rutgers University,edu,31f1e711fcf82c855f27396f181bf5e565a2f58d,citation,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.54,Unconstrained Age Estimation with Deep Convolutional Neural Networks,2015 +78,Adience,adience,39.2899685,-76.62196103,University of Maryland,edu,31f1e711fcf82c855f27396f181bf5e565a2f58d,citation,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.54,Unconstrained Age Estimation with Deep Convolutional Neural Networks,2015 +79,Adience,adience,32.77824165,34.99565673,Open University of Israel,edu,62e913431bcef5983955e9ca160b91bb19d9de42,citation,http://pdfs.semanticscholar.org/62e9/13431bcef5983955e9ca160b91bb19d9de42.pdf,Facial Landmark Detection with Tweaked Convolutional Neural Networks,2015 +80,Adience,adience,34.67567405,33.04577648,Cyprus University of Technology,edu,9f3c9e41f46df9c94d714b1f080dafad6b4de1de,citation,https://doi.org/10.1109/ICT.2017.7998260,On the detection of images containing child-pornographic material,2017 +81,Adience,adience,51.49887085,-0.17560797,Imperial College London,edu,54bb25a213944b08298e4e2de54f2ddea890954a,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf,"AgeDB: The First Manually Collected, In-the-Wild Age Database",2017 +82,Adience,adience,51.59029705,-0.22963221,Middlesex University,edu,54bb25a213944b08298e4e2de54f2ddea890954a,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf,"AgeDB: The First Manually Collected, In-the-Wild Age Database",2017 +83,Adience,adience,37.5600406,126.9369248,Yonsei University,edu,fde41dc4ec6ac6474194b99e05b43dd6a6c4f06f,citation,https://arxiv.org/pdf/1809.01990.pdf,Multi-Expert Gender Classification on Age Group by Integrating Deep Neural Networks,2018 +84,Adience,adience,23.143197,113.34009651,South China Normal University,edu,dc6ad30c7a4bc79bb06b4725b16e202d3d7d8935,citation,http://doi.org/10.1007/s11042-017-4646-5,Age classification with deep learning face representation,2017 +85,Adience,adience,23.0502042,113.39880323,South China University of Technology,edu,dc6ad30c7a4bc79bb06b4725b16e202d3d7d8935,citation,http://doi.org/10.1007/s11042-017-4646-5,Age classification with deep learning face representation,2017 +86,Adience,adience,34.2152538,117.1398541,China University of Mining and Technology,edu,bc6a7390135bf127b93b90a21b1fdebbfb56ad30,citation,https://doi.org/10.1109/TIFS.2017.2766039,Bimodal Vein Data Mining via Cross-Selected-Domain Knowledge Transfer,2018 +87,Adience,adience,31.2284923,121.40211389,East China Normal University,edu,bc6a7390135bf127b93b90a21b1fdebbfb56ad30,citation,https://doi.org/10.1109/TIFS.2017.2766039,Bimodal Vein Data Mining via Cross-Selected-Domain Knowledge Transfer,2018 +88,Adience,adience,25.0410728,121.6147562,Institute of Information Science,edu,337dd4aaca2c5f9b5d2de8e0e2401b5a8feb9958,citation,https://arxiv.org/pdf/1810.11160.pdf,Data-specific Adaptive Threshold for Face Recognition and Authentication,2018 +89,Adience,adience,28.3656193,75.5834953,"Central Electronics Research Institute, Pilani, India",edu,1aeef2ab062c27e0dbba481047e818d4c471ca57,citation,https://doi.org/10.1109/ICACCI.2015.7275860,Analyzing impact of image scaling algorithms on viola-jones face detection framework,2015 +90,Adience,adience,36.3697191,127.362537,Korea Advanced Institute of Science and Technology,edu,2911e7f0fb6803851b0eddf8067a6fc06e8eadd6,citation,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Jung_Joint_Fine-Tuning_in_ICCV_2015_paper.pdf,Joint Fine-Tuning in Deep Neural Networks for Facial Expression Recognition,2015 +91,Adience,adience,23.7317915,90.3805625,Dhaka University,edu,026e4ee480475e63ae68570d73388f8dfd4b4cde,citation,http://pdfs.semanticscholar.org/026e/4ee480475e63ae68570d73388f8dfd4b4cde.pdf,Evaluating gender portrayal in Bangladeshi TV,2017 +92,Adience,adience,40.0505672,-75.37109326,Eastern University,edu,026e4ee480475e63ae68570d73388f8dfd4b4cde,citation,http://pdfs.semanticscholar.org/026e/4ee480475e63ae68570d73388f8dfd4b4cde.pdf,Evaluating gender portrayal in Bangladeshi TV,2017 +93,Adience,adience,42.3583961,-71.09567788,MIT,edu,026e4ee480475e63ae68570d73388f8dfd4b4cde,citation,http://pdfs.semanticscholar.org/026e/4ee480475e63ae68570d73388f8dfd4b4cde.pdf,Evaluating gender portrayal in Bangladeshi TV,2017 +94,Adience,adience,-22.8148374,-47.0647708,University of Campinas (UNICAMP),edu,b161d261fabb507803a9e5834571d56a3b87d147,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8122913,Gender recognition from face images using a geometric descriptor,2017 +95,Adience,adience,58.38131405,26.72078081,University of Tartu,edu,1b248ed8e7c9514648cd598960fadf9ab17e7fe8,citation,https://pdfs.semanticscholar.org/1b24/8ed8e7c9514648cd598960fadf9ab17e7fe8.pdf,"From apparent to real age: gender, age, ethnic, makeup, and expression bias analysis in real age estimation",0 +96,Adience,adience,41.3868913,2.16352385,University of Barcelona,edu,1b248ed8e7c9514648cd598960fadf9ab17e7fe8,citation,https://pdfs.semanticscholar.org/1b24/8ed8e7c9514648cd598960fadf9ab17e7fe8.pdf,"From apparent to real age: gender, age, ethnic, makeup, and expression bias analysis in real age estimation",0 +97,Adience,adience,31.76909325,117.17795091,Anhui University,edu,b910590a0eb191d03e1aedb3d55c905129e92e6b,citation,http://doi.acm.org/10.1145/2808492.2808570,Robust gender classification on unconstrained face images,2015 +98,Adience,adience,40.0044795,116.370238,Chinese Academy of Sciences,edu,b910590a0eb191d03e1aedb3d55c905129e92e6b,citation,http://doi.acm.org/10.1145/2808492.2808570,Robust gender classification on unconstrained face images,2015 +99,Adience,adience,43.7743911,-79.50481085,York University,edu,ffe4bb47ec15f768e1744bdf530d5796ba56cfc1,citation,https://arxiv.org/pdf/1706.04277.pdf,AFIF4: Deep Gender Classification based on AdaBoost-based Fusion of Isolated Facial Features and Foggy Faces,2017 +100,Adience,adience,27.18794105,31.17009498,Assiut University,edu,ffe4bb47ec15f768e1744bdf530d5796ba56cfc1,citation,https://arxiv.org/pdf/1706.04277.pdf,AFIF4: Deep Gender Classification based on AdaBoost-based Fusion of Isolated Facial Features and Foggy Faces,2017 diff --git a/site/datasets/final/aflw.csv b/site/datasets/final/aflw.csv new file mode 100644 index 00000000..29cfe134 --- /dev/null +++ b/site/datasets/final/aflw.csv @@ -0,0 +1,212 @@ +index,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,AFLW,aflw,0.0,0.0,,,a74251efa970b92925b89eeef50a5e37d9281ad0,main,http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf,"Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization",2011 +1,AFLW,aflw,42.2942142,-83.71003894,University of Michigan,edu,860588fafcc80c823e66429fadd7e816721da42a,citation,https://arxiv.org/pdf/1804.04412.pdf,Unsupervised Discovery of Object Landmarks as Structural Representations,2018 +2,AFLW,aflw,22.42031295,114.20788644,Chinese University of Hong Kong,edu,433a6d6d2a3ed8a6502982dccc992f91d665b9b3,citation,http://pdfs.semanticscholar.org/433a/6d6d2a3ed8a6502982dccc992f91d665b9b3.pdf,Transferring Landmark Annotations for Cross-Dataset Face Alignment,2014 +3,AFLW,aflw,40.00229045,116.32098908,Tsinghua University,edu,433a6d6d2a3ed8a6502982dccc992f91d665b9b3,citation,http://pdfs.semanticscholar.org/433a/6d6d2a3ed8a6502982dccc992f91d665b9b3.pdf,Transferring Landmark Annotations for Cross-Dataset Face Alignment,2014 +4,AFLW,aflw,-27.47715625,153.02841004,Queensland University of Technology,edu,6342a4c54835c1e14159495373ab18b4233d2d9b,citation,http://pdfs.semanticscholar.org/6342/a4c54835c1e14159495373ab18b4233d2d9b.pdf,Towards Pose-robust Face Recognition on Video,2014 +5,AFLW,aflw,39.993008,116.329882,SenseTime,company,38183fe28add21693729ddeaf3c8a90a2d5caea3,citation,http://arxiv.org/abs/1706.09876,Scale-Aware Face Detection,2017 +6,AFLW,aflw,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,2c17d36bab56083293456fe14ceff5497cc97d75,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhu_Unconstrained_Face_Alignment_CVPR_2016_paper.pdf,Unconstrained Face Alignment via Cascaded Compositional Learning,2016 +7,AFLW,aflw,22.42031295,114.20788644,Chinese University of Hong Kong,edu,2c17d36bab56083293456fe14ceff5497cc97d75,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhu_Unconstrained_Face_Alignment_CVPR_2016_paper.pdf,Unconstrained Face Alignment via Cascaded Compositional Learning,2016 +8,AFLW,aflw,47.05821,15.46019568,Graz University of Technology,edu,4ab10174a4f98f7e2da7cf6ccfeb9bc64c8e7da8,citation,http://pdfs.semanticscholar.org/4ab1/0174a4f98f7e2da7cf6ccfeb9bc64c8e7da8.pdf,Efficient Metric Learning for Real-World Face Recognition,2013 +9,AFLW,aflw,22.53521465,113.9315911,Shenzhen University,edu,32ecbbd76fdce249f9109594eee2d52a1cafdfc7,citation,http://pdfs.semanticscholar.org/32ec/bbd76fdce249f9109594eee2d52a1cafdfc7.pdf,Object Specific Deep Learning Feature and Its Application to Face Detection,2016 +10,AFLW,aflw,52.9387428,-1.20029569,University of Nottingham,edu,32ecbbd76fdce249f9109594eee2d52a1cafdfc7,citation,http://pdfs.semanticscholar.org/32ec/bbd76fdce249f9109594eee2d52a1cafdfc7.pdf,Object Specific Deep Learning Feature and Its Application to Face Detection,2016 +11,AFLW,aflw,40.0044795,116.370238,Chinese Academy of Sciences,edu,4e6c17966efae956133bf8f22edeffc24a0470c1,citation,http://pdfs.semanticscholar.org/4e6c/17966efae956133bf8f22edeffc24a0470c1.pdf,Face Classification: A Specialized Benchmark Study,2016 +12,AFLW,aflw,22.15263985,113.56803206,Macau University of Science and Technology,edu,4e6c17966efae956133bf8f22edeffc24a0470c1,citation,http://pdfs.semanticscholar.org/4e6c/17966efae956133bf8f22edeffc24a0470c1.pdf,Face Classification: A Specialized Benchmark Study,2016 +13,AFLW,aflw,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,4e6c17966efae956133bf8f22edeffc24a0470c1,citation,http://pdfs.semanticscholar.org/4e6c/17966efae956133bf8f22edeffc24a0470c1.pdf,Face Classification: A Specialized Benchmark Study,2016 +14,AFLW,aflw,37.4102193,-122.05965487,Carnegie Mellon University,edu,f1b4583c576d6d8c661b4b2c82bdebf3ba3d7e53,citation,https://arxiv.org/pdf/1707.05653.pdf,Faster than Real-Time Facial Alignment: A 3D Spatial Transformer Network Approach in Unconstrained Poses,2017 +15,AFLW,aflw,29.6328784,-82.3490133,University of Florida,edu,441bf5f7fe7d1a3939d8b200eca9b4bb619449a9,citation,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W08/papers/Sundararajan_Head_Pose_Estimation_2015_CVPR_paper.pdf,Head pose estimation in the wild using approximate view manifolds,2015 +16,AFLW,aflw,37.4102193,-122.05965487,Carnegie Mellon University,edu,1ca815327e62c70f4ee619a836e05183ef629567,citation,http://www.humansensing.cs.cmu.edu/sites/default/files/Xiong_Global_Supervised_Descent_2015_CVPR_paper.pdf,Global supervised descent method,2015 +17,AFLW,aflw,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,6495d989fe33b19d2b7755f9077d8b5bf3190151,citation,https://arxiv.org/pdf/1803.07835.pdf,Joint 3D Face Reconstruction and Dense Alignment with Position Map Regression Network,2018 +18,AFLW,aflw,40.62984145,22.9588935,Aristotle University of Thessaloniki,edu,ccebd3bf069f5c73ea2ccc5791976f894bc6023d,citation,https://doi.org/10.1109/ICPR.2016.7900186,Face detection based on deep convolutional neural networks exploiting incremental facial part learning,2016 +19,AFLW,aflw,51.24303255,-0.59001382,University of Surrey,edu,c146aa6d56233ce700032f1cb179700778557601,citation,https://arxiv.org/pdf/1708.07199.pdf,3D Morphable Models as Spatial Transformer Networks,2017 +20,AFLW,aflw,53.94540365,-1.03138878,University of York,edu,c146aa6d56233ce700032f1cb179700778557601,citation,https://arxiv.org/pdf/1708.07199.pdf,3D Morphable Models as Spatial Transformer Networks,2017 +21,AFLW,aflw,51.24303255,-0.59001382,University of Surrey,edu,438e7999c937b94f0f6384dbeaa3febff6d283b6,citation,https://arxiv.org/pdf/1705.02402v2.pdf,"Face Detection, Bounding Box Aggregation and Pose Estimation for Robust Facial Landmark Localisation in the Wild",2017 +22,AFLW,aflw,31.4854255,120.2739581,Jiangnan University,edu,438e7999c937b94f0f6384dbeaa3febff6d283b6,citation,https://arxiv.org/pdf/1705.02402v2.pdf,"Face Detection, Bounding Box Aggregation and Pose Estimation for Robust Facial Landmark Localisation in the Wild",2017 +23,AFLW,aflw,37.4102193,-122.05965487,Carnegie Mellon University,edu,b1fdd4ae17d82612cefd4e78b690847b071379d3,citation,https://pdfs.semanticscholar.org/4fc5/416b6c7173d3462e5be796bda3ad8d5645a1.pdf,Supervised Descent Method,2015 +24,AFLW,aflw,40.0044795,116.370238,Chinese Academy of Sciences,edu,a5f35880477ae82902c620245e258cf854c09be9,citation,http://doi.org/10.1016/j.imavis.2013.12.004,Face detection by structural models,2014 +25,AFLW,aflw,51.24303255,-0.59001382,University of Surrey,edu,96c6f50ce8e1b9e8215b8791dabd78b2bbd5f28d,citation,https://arxiv.org/pdf/1611.05396.pdf,Dynamic Attention-Controlled Cascaded Shape Regression Exploiting Training Data Augmentation and Fuzzy-Set Sample Weighting,2017 +26,AFLW,aflw,31.4854255,120.2739581,Jiangnan University,edu,96c6f50ce8e1b9e8215b8791dabd78b2bbd5f28d,citation,https://arxiv.org/pdf/1611.05396.pdf,Dynamic Attention-Controlled Cascaded Shape Regression Exploiting Training Data Augmentation and Fuzzy-Set Sample Weighting,2017 +27,AFLW,aflw,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,d3b0839324d0091e70ce34f44c979b9366547327,citation,https://arxiv.org/pdf/1804.10743.pdf,Precise Box Score: Extract More Information from Datasets to Improve the Performance of Face Detection,2018 +28,AFLW,aflw,47.5612651,7.5752961,University of Basel,edu,7caa3a74313f9a7a2dd5b4c2cd7f825d895d3794,citation,http://doi.org/10.1007/s11263-016-0967-5,Markov Chain Monte Carlo for Automated Face Image Analysis,2016 +29,AFLW,aflw,42.718568,-84.47791571,Michigan State University,edu,cd55fb30737625e86454a2861302b96833ed549d,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139094,Annotating Unconstrained Face Imagery: A scalable approach,2015 +30,AFLW,aflw,38.95187,-77.363259,"Noblis, Falls Church, VA, U.S.A.",company,cd55fb30737625e86454a2861302b96833ed549d,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139094,Annotating Unconstrained Face Imagery: A scalable approach,2015 +31,AFLW,aflw,51.7534538,-1.25400997,University of Oxford,edu,7117ed0be436c0291bc6fb6ea6db18de74e2464a,citation,https://pdfs.semanticscholar.org/7117/ed0be436c0291bc6fb6ea6db18de74e2464a.pdf,Spatial Transformations,2017 +32,AFLW,aflw,22.42031295,114.20788644,Chinese University of Hong Kong,edu,8a3c5507237957d013a0fe0f082cab7f757af6ee,citation,http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf,Facial Landmark Detection by Deep Multi-task Learning,2014 +33,AFLW,aflw,47.05821,15.46019568,Graz University of Technology,edu,5c8672c0d2f28fd5d2d2c4b9818fcff43fb01a48,citation,http://pdfs.semanticscholar.org/5c86/72c0d2f28fd5d2d2c4b9818fcff43fb01a48.pdf,Robust Face Detection by Simple Means,2012 +34,AFLW,aflw,30.642769,104.06751175,"Sichuan University, Chengdu",edu,5cbe1445d683d605b31377881ac8540e1d17adf0,citation,https://arxiv.org/pdf/1509.06161.pdf,On 3D face reconstruction via cascaded regression in shape space,2017 +35,AFLW,aflw,51.24303255,-0.59001382,University of Surrey,edu,3c6cac7ecf546556d7c6050f7b693a99cc8a57b3,citation,https://pdfs.semanticscholar.org/3c6c/ac7ecf546556d7c6050f7b693a99cc8a57b3.pdf,Robust facial landmark detection in the wild,2016 +36,AFLW,aflw,22.53521465,113.9315911,Shenzhen University,edu,287de191c49a3caa38ad7594093045dfba1eb420,citation,https://doi.org/10.23919/MVA.2017.7986829,Object specific deep feature and its application to face detection,2017 +37,AFLW,aflw,52.9387428,-1.20029569,University of Nottingham,edu,287de191c49a3caa38ad7594093045dfba1eb420,citation,https://doi.org/10.23919/MVA.2017.7986829,Object specific deep feature and its application to face detection,2017 +38,AFLW,aflw,40.0044795,116.370238,Chinese Academy of Sciences,edu,2f04ba0f74df046b0080ca78e56898bd4847898b,citation,http://arxiv.org/abs/1407.4023,Aggregate channel features for multi-view face detection,2014 +39,AFLW,aflw,33.6431901,-117.84016494,"University of California, Irvine",edu,65126e0b1161fc8212643b8ff39c1d71d262fbc1,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Ghiasi_Occlusion_Coherence_Localizing_2014_CVPR_paper.pdf,Occlusion Coherence: Localizing Occluded Faces with a Hierarchical Deformable Part Model,2014 +40,AFLW,aflw,38.99203005,-76.9461029,University of Maryland College Park,edu,4f36c14d1453fc9d6481b09c5a09e91d8d9ee47a,citation,http://pdfs.semanticscholar.org/4f36/c14d1453fc9d6481b09c5a09e91d8d9ee47a.pdf,Video-Based Face Recognition Using the Intra/Extra-Personal Difference Dictionary,2014 +41,AFLW,aflw,39.2899685,-76.62196103,University of Maryland,edu,4f36c14d1453fc9d6481b09c5a09e91d8d9ee47a,citation,http://pdfs.semanticscholar.org/4f36/c14d1453fc9d6481b09c5a09e91d8d9ee47a.pdf,Video-Based Face Recognition Using the Intra/Extra-Personal Difference Dictionary,2014 +42,AFLW,aflw,25.01353105,121.54173736,National Taiwan University of Science and Technology,edu,e4e07f5f201c6986e93ddb42dcf11a43c339ea2e,citation,https://doi.org/10.1109/BTAS.2017.8272722,Cross-pose landmark localization using multi-dropout framework,2017 +43,AFLW,aflw,32.87935255,-117.23110049,"University of California, San Diego",edu,a1e07c31184d3728e009d4d1bebe21bf9fe95c8e,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900056,"On looking at faces in an automobile: Issues, algorithms and evaluation on naturalistic driving dataset",2016 +44,AFLW,aflw,22.42031295,114.20788644,Chinese University of Hong Kong,edu,329d58e8fb30f1bf09acb2f556c9c2f3e768b15c,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Wu_Leveraging_Intra_and_CVPR_2017_paper.pdf,Leveraging Intra and Inter-Dataset Variations for Robust Face Alignment,2017 +45,AFLW,aflw,40.00229045,116.32098908,Tsinghua University,edu,329d58e8fb30f1bf09acb2f556c9c2f3e768b15c,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Wu_Leveraging_Intra_and_CVPR_2017_paper.pdf,Leveraging Intra and Inter-Dataset Variations for Robust Face Alignment,2017 +46,AFLW,aflw,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,04661729f0ff6afe4b4d6223f18d0da1d479accf,citation,http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.419,From Facial Parts Responses to Face Detection: A Deep Learning Approach,2015 +47,AFLW,aflw,22.42031295,114.20788644,Chinese University of Hong Kong,edu,04661729f0ff6afe4b4d6223f18d0da1d479accf,citation,http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.419,From Facial Parts Responses to Face Detection: A Deep Learning Approach,2015 +48,AFLW,aflw,17.4454957,78.34854698,International Institute of Information Technology,edu,185263189a30986e31566394680d6d16b0089772,citation,https://pdfs.semanticscholar.org/1852/63189a30986e31566394680d6d16b0089772.pdf,Efficient Annotation of Objects for Video Analysis,2018 +49,AFLW,aflw,35.77184965,-78.67408695,North Carolina State University,edu,9bd35145c48ce172b80da80130ba310811a44051,citation,https://arxiv.org/pdf/1606.00850.pdf,Face Detection with End-to-End Integration of a ConvNet and a 3D Model,2016 +50,AFLW,aflw,39.9922379,116.30393816,Peking University,edu,9bd35145c48ce172b80da80130ba310811a44051,citation,https://arxiv.org/pdf/1606.00850.pdf,Face Detection with End-to-End Integration of a ConvNet and a 3D Model,2016 +51,AFLW,aflw,40.0044795,116.370238,Chinese Academy of Sciences,edu,45e616093a92e5f1e61a7c6037d5f637aa8964af,citation,http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf,Fine-grained evaluation on face detection in the wild,2015 +52,AFLW,aflw,32.7283683,-97.11201835,University of Texas at Arlington,edu,411dc8874fd7b3a9a4c1fd86bb5b583788027776,citation,https://pdfs.semanticscholar.org/701f/56f0eac9f88387de1f556acef78016b05d52.pdf,Direct Shape Regression Networks for End-to-End Face Alignment,2018 +53,AFLW,aflw,34.1235825,108.83546,Xidian University,edu,411dc8874fd7b3a9a4c1fd86bb5b583788027776,citation,https://pdfs.semanticscholar.org/701f/56f0eac9f88387de1f556acef78016b05d52.pdf,Direct Shape Regression Networks for End-to-End Face Alignment,2018 +54,AFLW,aflw,42.36782045,-71.12666653,Harvard University,edu,3cb057a24a8adba6fe964b5d461ba4e4af68af14,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6701391,Perceptual Annotation: Measuring Human Vision to Improve Computer Vision,2014 +55,AFLW,aflw,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,cf5c9b521c958b84bb63bea9d5cbb522845e4ba7,citation,http://pdfs.semanticscholar.org/cf5c/9b521c958b84bb63bea9d5cbb522845e4ba7.pdf,Towards Arbitrary-View Face Alignment by Recommendation Trees,2015 +56,AFLW,aflw,22.42031295,114.20788644,Chinese University of Hong Kong,edu,cf5c9b521c958b84bb63bea9d5cbb522845e4ba7,citation,http://pdfs.semanticscholar.org/cf5c/9b521c958b84bb63bea9d5cbb522845e4ba7.pdf,Towards Arbitrary-View Face Alignment by Recommendation Trees,2015 +57,AFLW,aflw,39.329053,-76.619425,Johns Hopkins University,edu,377f2b65e6a9300448bdccf678cde59449ecd337,citation,https://arxiv.org/pdf/1804.10275.pdf,Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results,2018 +58,AFLW,aflw,40.47913175,-74.43168868,Rutgers University,edu,377f2b65e6a9300448bdccf678cde59449ecd337,citation,https://arxiv.org/pdf/1804.10275.pdf,Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results,2018 +59,AFLW,aflw,39.2899685,-76.62196103,University of Maryland,edu,93420d9212dd15b3ef37f566e4d57e76bb2fab2f,citation,https://arxiv.org/pdf/1611.00851.pdf,An All-In-One Convolutional Neural Network for Face Analysis,2017 +60,AFLW,aflw,49.10184375,8.4331256,Karlsruhe Institute of Technology,edu,8ee5b1c9fb0bded3578113c738060290403ed472,citation,https://infoscience.epfl.ch/record/200452/files/wacv2014-RGE.pdf,Extending explicit shape regression with mixed feature channels and pose priors,2014 +61,AFLW,aflw,34.0224149,-118.28634407,University of Southern California,edu,43e99b76ca8e31765d4571d609679a689afdc99e,citation,http://arxiv.org/abs/1709.00536,Learning Dense Facial Correspondences in Unconstrained Images,2017 +62,AFLW,aflw,38.88140235,121.52281098,Dalian University of Technology,edu,f074e86e003d5b7a3b6e1780d9c323598d93f3bc,citation,http://pdfs.semanticscholar.org/f074/e86e003d5b7a3b6e1780d9c323598d93f3bc.pdf,Characteristic Number: Theory and Its Application to Shape Analysis,2014 +63,AFLW,aflw,38.99203005,-76.9461029,University of Maryland College Park,edu,1389ba6c3ff34cdf452ede130c738f37dca7e8cb,citation,http://pdfs.semanticscholar.org/1389/ba6c3ff34cdf452ede130c738f37dca7e8cb.pdf,A Convolution Tree with Deconvolution Branches: Exploiting Geometric Relationships for Single Shot Keypoint Detection,2017 +64,AFLW,aflw,22.42031295,114.20788644,Chinese University of Hong Kong,edu,85674b1b6007634f362cbe9b921912b697c0a32c,citation,http://pdfs.semanticscholar.org/8567/4b1b6007634f362cbe9b921912b697c0a32c.pdf,Optimizing Facial Landmark Detection by Facial Attribute Learning,2014 +65,AFLW,aflw,51.7534538,-1.25400997,University of Oxford,edu,8d9ffe9f7bf1ff3ecc320afe50a92a867a12aeb7,citation,https://arxiv.org/pdf/1809.02169.pdf,Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings,2018 +66,AFLW,aflw,38.99203005,-76.9461029,University of Maryland College Park,edu,f7824758800a7b1a386db5bd35f84c81454d017a,citation,https://arxiv.org/pdf/1702.05085.pdf,KEPLER: Keypoint and Pose Estimation of Unconstrained Faces by Learning Efficient H-CNN Regressors,2017 +67,AFLW,aflw,17.4454957,78.34854698,International Institute of Information Technology,edu,156cd2a0e2c378e4c3649a1d046cd080d3338bca,citation,http://pdfs.semanticscholar.org/156c/d2a0e2c378e4c3649a1d046cd080d3338bca.pdf,Exemplar based approaches on Face Fiducial Detection and Frontalization,2017 +68,AFLW,aflw,39.7275037,39.47127034,Firat University,edu,5cfbeae360398de9e20e4165485837bd42b93217,citation,http://pdfs.semanticscholar.org/5cfb/eae360398de9e20e4165485837bd42b93217.pdf,Comparison Of Hog (Histogram of Oriented Gradients) and Haar Cascade Algorithms with a Convolutional Neural Network Based Face Detection Approaches,2017 +69,AFLW,aflw,29.5084174,106.57858552,Chongqing University,edu,a065080353d18809b2597246bb0b48316234c29a,citation,http://pdfs.semanticscholar.org/a065/080353d18809b2597246bb0b48316234c29a.pdf,FHEDN: A based on context modeling Feature Hierarchy Encoder-Decoder Network for face detection,2017 +70,AFLW,aflw,52.22165395,21.00735776,Warsaw University of Technology,edu,f27b8b8f2059248f77258cf8595e9434cf0b0228,citation,https://arxiv.org/pdf/1706.01789.pdf,Deep Alignment Network: A Convolutional Neural Network for Robust Face Alignment,2017 +71,AFLW,aflw,53.46600455,-2.23300881,University of Manchester,edu,68c1090f912b69b76437644dd16922909dd40d60,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6987312,Robust and Accurate Shape Model Matching Using Random Forest Regression-Voting,2012 +72,AFLW,aflw,32.77824165,34.99565673,Open University of Israel,edu,62e913431bcef5983955e9ca160b91bb19d9de42,citation,http://pdfs.semanticscholar.org/62e9/13431bcef5983955e9ca160b91bb19d9de42.pdf,Facial Landmark Detection with Tweaked Convolutional Neural Networks,2015 +73,AFLW,aflw,50.0764296,14.41802312,Czech Technical University,edu,f4ba07d2ae6c9673502daf50ee751a5e9262848f,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7284810,Real-time multi-view facial landmark detector learned by the structured output SVM,2015 +74,AFLW,aflw,35.6924853,139.7582533,"National Institute of Informatics, Japan",edu,f4ba07d2ae6c9673502daf50ee751a5e9262848f,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7284810,Real-time multi-view facial landmark detector learned by the structured output SVM,2015 +75,AFLW,aflw,40.00229045,116.32098908,Tsinghua University,edu,204f1cf56794bb23f9516b5f225a6ae00d3d30b8,citation,https://doi.org/10.1109/JSYST.2015.2418680,An AdaBoost-Based Face Detection System Using Parallel Configurable Architecture With Optimized Computation,2017 +76,AFLW,aflw,30.44235995,-84.29747867,Florida State University,edu,1ed6c7e02b4b3ef76f74dd04b2b6050faa6e2177,citation,http://pdfs.semanticscholar.org/6433/c412149382418ccd8aa966aa92973af41671.pdf,Face Detection with a 3D Model,2014 +77,AFLW,aflw,39.00041165,-77.10327775,National Institutes of Health,edu,1ed6c7e02b4b3ef76f74dd04b2b6050faa6e2177,citation,http://pdfs.semanticscholar.org/6433/c412149382418ccd8aa966aa92973af41671.pdf,Face Detection with a 3D Model,2014 +78,AFLW,aflw,42.718568,-84.47791571,Michigan State University,edu,37ce1d3a6415d6fc1760964e2a04174c24208173,citation,http://www.cse.msu.edu/~liuxm/publication/Jourabloo_Liu_ICCV2015.pdf,Pose-Invariant 3D Face Alignment,2015 +79,AFLW,aflw,42.718568,-84.47791571,Michigan State University,edu,ec8ec2dfd73cf3667f33595fef84c95c42125945,citation,https://arxiv.org/pdf/1707.06286.pdf,Pose-Invariant Face Alignment with a Single CNN,2017 +80,AFLW,aflw,43.07982815,-89.43066425,University of Wisconsin Madison,edu,2e091b311ac48c18aaedbb5117e94213f1dbb529,citation,http://pdfs.semanticscholar.org/b1a1/a049f1d78f6e3d072236237c467292ccd537.pdf,Collaborative Facial Landmark Localization for Transferring Annotations Across Datasets,2014 +81,AFLW,aflw,42.718568,-84.47791571,Michigan State University,edu,b53485dbdd2dc5e4f3c7cff26bd8707964bb0503,citation,http://doi.org/10.1007/s11263-017-1012-z,Pose-Invariant Face Alignment via CNN-Based Dense 3D Model Fitting,2017 +82,AFLW,aflw,22.42031295,114.20788644,Chinese University of Hong Kong,edu,abdd17e411a7bfe043f280abd4e560a04ab6e992,citation,https://arxiv.org/pdf/1803.00839.pdf,Pose-Robust Face Recognition via Deep Residual Equivariant Mapping,2018 +83,AFLW,aflw,42.718568,-84.47791571,Michigan State University,edu,085ceda1c65caf11762b3452f87660703f914782,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Jourabloo_Large-Pose_Face_Alignment_CVPR_2016_paper.pdf,Large-Pose Face Alignment via CNN-Based Dense 3D Model Fitting,2016 +84,AFLW,aflw,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,fcd3d557863e71dd5ce8bcf918adbe22ec59e62f,citation,http://doi.acm.org/10.1145/2502081.2502148,Facial landmark localization based on hierarchical pose regression with cascaded random ferns,2013 +85,AFLW,aflw,40.62984145,22.9588935,Aristotle University of Thessaloniki,edu,c00df53bd46f78ae925c5768d46080159d4ef87d,citation,https://arxiv.org/pdf/1707.08105.pdf,Learning Bag-of-Features Pooling for Deep Convolutional Neural Networks,2017 +86,AFLW,aflw,31.4854255,120.2739581,Jiangnan University,edu,d22dd4a6752a5ffa40aebd260ff63d2c2a9e1da1,citation,https://arxiv.org/pdf/1811.05295.pdf,Pose Invariant 3D Face Reconstruction,2018 +87,AFLW,aflw,28.59899755,-81.19712501,University of Central Florida,edu,c4fb2de4a5dc28710d9880aece321acf68338fde,citation,https://arxiv.org/pdf/1801.09092.pdf,Interactive Generative Adversarial Networks for Facial Expression Generation in Dyadic Interactions,2018 +88,AFLW,aflw,40.0044795,116.370238,Chinese Academy of Sciences,edu,c94b3a05f6f41d015d524169972ae8fd52871b67,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Yan_The_Fastest_Deformable_2014_CVPR_paper.pdf,The Fastest Deformable Part Model for Object Detection,2014 +89,AFLW,aflw,22.42031295,114.20788644,Chinese University of Hong Kong,edu,2a171f8d14b6b8735001a11c217af9587d095848,citation,http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414,Learning Social Relation Traits from Face Images,2015 +90,AFLW,aflw,23.09461185,113.28788994,Sun Yat-Sen University,edu,4c078c2919c7bdc26ca2238fa1a79e0331898b56,citation,http://pdfs.semanticscholar.org/4c07/8c2919c7bdc26ca2238fa1a79e0331898b56.pdf,Unconstrained Facial Landmark Localization with Backbone-Branches Fully-Convolutional Networks,2015 +91,AFLW,aflw,52.9387428,-1.20029569,University of Nottingham,edu,721e5ba3383b05a78ef1dfe85bf38efa7e2d611d,citation,http://pdfs.semanticscholar.org/74f1/9d0986c9d39aabb359abaa2a87a248a48deb.pdf,"BULAT, TZIMIROPOULOS: CONVOLUTIONAL AGGREGATION OF LOCAL EVIDENCE 1 Convolutional aggregation of local evidence for large pose face alignment",2016 +92,AFLW,aflw,47.5612651,7.5752961,University of Basel,edu,0c20fd90d867fe1be2459223a3cb1a69fa3d44bf,citation,http://pdfs.semanticscholar.org/0c20/fd90d867fe1be2459223a3cb1a69fa3d44bf.pdf,A Monte Carlo Strategy to Integrate Detection and Model-Based Face Analysis,2013 +93,AFLW,aflw,39.9041999,116.4073963,"Beijing FaceAll Co., Beijing, China",edu,c7cd490e43ee4ff81e8f86f790063695369c2830,citation,https://doi.org/10.1109/VCIP.2016.7805472,Use fast R-CNN and cascade structure for face detection,2016 +94,AFLW,aflw,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,c7cd490e43ee4ff81e8f86f790063695369c2830,citation,https://doi.org/10.1109/VCIP.2016.7805472,Use fast R-CNN and cascade structure for face detection,2016 +95,AFLW,aflw,47.05821,15.46019568,Graz University of Technology,edu,96a9ca7a8366ae0efe6b58a515d15b44776faf6e,citation,https://arxiv.org/pdf/1609.00129.pdf,Grid Loss: Detecting Occluded Faces,2016 +96,AFLW,aflw,49.2579566,7.04577417,Max Planck Institute for Informatics,edu,3b73f8a2b39751efb7d7b396bf825af2aaadee24,citation,https://arxiv.org/pdf/1712.01066.pdf,Connecting Pixels to Privacy and Utility: Automatic Redaction of Private Information in Images,2017 +97,AFLW,aflw,47.5612651,7.5752961,University of Basel,edu,043efe5f465704ced8d71a067d2b9d5aa5b59c29,citation,https://pdfs.semanticscholar.org/000a/c6b0865c79bcf0d6f7f069b3abfe229e1462.pdf,Occlusion-aware 3D Morphable Face Models,2016 +98,AFLW,aflw,40.62984145,22.9588935,Aristotle University of Thessaloniki,edu,ede5982980aa76deae8f9dc5143a724299d67742,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8081396,Lightweight two-stream convolutional face detection,2017 +99,AFLW,aflw,51.7534538,-1.25400997,University of Oxford,edu,a3d0ebb50d49116289fb176d28ea98a92badada6,citation,https://pdfs.semanticscholar.org/a3d0/ebb50d49116289fb176d28ea98a92badada6.pdf,Unsupervised Learning of Object Landmarks through Conditional Image Generation,2018 +100,AFLW,aflw,55.94951105,-3.19534913,University of Edinburgh,edu,a3d0ebb50d49116289fb176d28ea98a92badada6,citation,https://pdfs.semanticscholar.org/a3d0/ebb50d49116289fb176d28ea98a92badada6.pdf,Unsupervised Learning of Object Landmarks through Conditional Image Generation,2018 +101,AFLW,aflw,51.24303255,-0.59001382,University of Surrey,edu,ed07856461da6c7afa4f1782b5b607b45eebe9f6,citation,https://pdfs.semanticscholar.org/ed07/856461da6c7afa4f1782b5b607b45eebe9f6.pdf,D Morphable Models as Spatial Transformer Networks,2017 +102,AFLW,aflw,53.94540365,-1.03138878,University of York,edu,ed07856461da6c7afa4f1782b5b607b45eebe9f6,citation,https://pdfs.semanticscholar.org/ed07/856461da6c7afa4f1782b5b607b45eebe9f6.pdf,D Morphable Models as Spatial Transformer Networks,2017 +103,AFLW,aflw,37.4173931,-121.9475721,"ARM, Inc.",company,0974677f59e78649a40f0a1d85735410d21b906a,citation,https://doi.org/10.1109/ASPDAC.2017.7858282,A real-time 17-scale object detection accelerator with adaptive 2000-stage classification in 65nm CMOS,2017 +104,AFLW,aflw,30.19331415,120.11930822,Zhejiang University,edu,0974677f59e78649a40f0a1d85735410d21b906a,citation,https://doi.org/10.1109/ASPDAC.2017.7858282,A real-time 17-scale object detection accelerator with adaptive 2000-stage classification in 65nm CMOS,2017 +105,AFLW,aflw,33.30715065,-111.67653157,Arizona State University,edu,0974677f59e78649a40f0a1d85735410d21b906a,citation,https://doi.org/10.1109/ASPDAC.2017.7858282,A real-time 17-scale object detection accelerator with adaptive 2000-stage classification in 65nm CMOS,2017 +106,AFLW,aflw,23.04436505,113.36668458,Guangzhou University,edu,293d69d042fe9bc4fea256c61915978ddaf7cc92,citation,https://doi.org/10.1007/978-981-10-7302-1_6,Face Recognition by Coarse-to-Fine Landmark Regression with Application to ATM Surveillance,2017 +107,AFLW,aflw,23.09461185,113.28788994,Sun Yat-Sen University,edu,293d69d042fe9bc4fea256c61915978ddaf7cc92,citation,https://doi.org/10.1007/978-981-10-7302-1_6,Face Recognition by Coarse-to-Fine Landmark Regression with Application to ATM Surveillance,2017 +108,AFLW,aflw,51.24303255,-0.59001382,University of Surrey,edu,56e25056153a15eae2a6b10c109f812d2b753cee,citation,https://arxiv.org/pdf/1711.06753.pdf,Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks,2017 +109,AFLW,aflw,31.4854255,120.2739581,Jiangnan University,edu,56e25056153a15eae2a6b10c109f812d2b753cee,citation,https://arxiv.org/pdf/1711.06753.pdf,Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks,2017 +110,AFLW,aflw,-33.8809651,151.20107299,University of Technology Sydney,edu,ebc2a3e8a510c625353637e8e8f07bd34410228f,citation,https://doi.org/10.1109/TIP.2015.2502485,Dual Sparse Constrained Cascade Regression for Robust Face Alignment,2016 +111,AFLW,aflw,38.99203005,-76.9461029,University of Maryland College Park,edu,b2cd92d930ed9b8d3f9dfcfff733f8384aa93de8,citation,http://pdfs.semanticscholar.org/b2cd/92d930ed9b8d3f9dfcfff733f8384aa93de8.pdf,"HyperFace: A Deep Multi-task Learning Framework for Face Detection, Landmark Localization, Pose Estimation, and Gender Recognition",2016 +112,AFLW,aflw,39.2899685,-76.62196103,University of Maryland,edu,b2cd92d930ed9b8d3f9dfcfff733f8384aa93de8,citation,http://pdfs.semanticscholar.org/b2cd/92d930ed9b8d3f9dfcfff733f8384aa93de8.pdf,"HyperFace: A Deep Multi-task Learning Framework for Face Detection, Landmark Localization, Pose Estimation, and Gender Recognition",2016 +113,AFLW,aflw,47.5612651,7.5752961,University of Basel,edu,5789f8420d8f15e7772580ec373112f864627c4b,citation,http://doi.ieeecomputersociety.org/10.1109/ICCV.2017.417,Efficient Global Illumination for Morphable Models,2017 +114,AFLW,aflw,51.4293086,-0.2684044,Kingston University,edu,01125e3c68edb420b8d884ff53fb38d9fbe4f2b8,citation,http://openaccess.thecvf.com/content_ICCV_2017/papers/Jackson_Large_Pose_3D_ICCV_2017_paper.pdf,Large Pose 3D Face Reconstruction from a Single Image via Direct Volumetric CNN Regression,2017 +115,AFLW,aflw,52.9387428,-1.20029569,University of Nottingham,edu,01125e3c68edb420b8d884ff53fb38d9fbe4f2b8,citation,http://openaccess.thecvf.com/content_ICCV_2017/papers/Jackson_Large_Pose_3D_ICCV_2017_paper.pdf,Large Pose 3D Face Reconstruction from a Single Image via Direct Volumetric CNN Regression,2017 +116,AFLW,aflw,39.9808333,116.34101249,Beihang University,edu,86b6afc667bb14ff4d69e7a5e8bb2454a6bbd2cd,citation,https://pdfs.semanticscholar.org/86b6/afc667bb14ff4d69e7a5e8bb2454a6bbd2cd.pdf,Attentional Alignment Networks,2018 +117,AFLW,aflw,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,86b6afc667bb14ff4d69e7a5e8bb2454a6bbd2cd,citation,https://pdfs.semanticscholar.org/86b6/afc667bb14ff4d69e7a5e8bb2454a6bbd2cd.pdf,Attentional Alignment Networks,2018 +118,AFLW,aflw,32.7283683,-97.11201835,University of Texas at Arlington,edu,86b6afc667bb14ff4d69e7a5e8bb2454a6bbd2cd,citation,https://pdfs.semanticscholar.org/86b6/afc667bb14ff4d69e7a5e8bb2454a6bbd2cd.pdf,Attentional Alignment Networks,2018 +119,AFLW,aflw,22.42031295,114.20788644,Chinese University of Hong Kong,edu,52d7eb0fbc3522434c13cc247549f74bb9609c5d,citation,https://arxiv.org/pdf/1511.06523.pdf,WIDER FACE: A Face Detection Benchmark,2016 +120,AFLW,aflw,32.0565957,118.77408833,Nanjing University,edu,b8978a5251b6e341a1171e4fd9177aec1432dd3a,citation,https://doi.org/10.1016/j.image.2016.04.004,FaceHunter: A multi-task convolutional neural network based face detector,2016 +121,AFLW,aflw,40.0044795,116.370238,Chinese Academy of Sciences,edu,3d18ce183b5a5b4dcaa1216e30b774ef49eaa46f,citation,https://arxiv.org/pdf/1511.07212.pdf,Face Alignment in Full Pose Range: A 3D Total Solution,2017 +122,AFLW,aflw,42.718568,-84.47791571,Michigan State University,edu,3d18ce183b5a5b4dcaa1216e30b774ef49eaa46f,citation,https://arxiv.org/pdf/1511.07212.pdf,Face Alignment in Full Pose Range: A 3D Total Solution,2017 +123,AFLW,aflw,51.6091578,-3.97934429,Swansea University,edu,d115c4a66d765fef596b0b171febca334cea15b5,citation,http://pdfs.semanticscholar.org/d115/c4a66d765fef596b0b171febca334cea15b5.pdf,Combining Stacked Denoising Autoencoders and Random Forests for Face Detection,2016 +124,AFLW,aflw,38.88140235,121.52281098,Dalian University of Technology,edu,19705579b8e7d955092ef54a22f95f557a455338,citation,https://doi.org/10.1109/ICIP.2014.7025277,Fiducial facial point extraction with cross ratio,2014 +125,AFLW,aflw,51.7534538,-1.25400997,University of Oxford,edu,79eb06c8acce1feef4a8654287d9cf5081e19600,citation,https://arxiv.org/pdf/1808.06882.pdf,Self-supervised learning of a facial attribute embedding from video,2018 +126,AFLW,aflw,37.4102193,-122.05965487,Carnegie Mellon University,edu,87e6cb090aecfc6f03a3b00650a5c5f475dfebe1,citation,https://pdfs.semanticscholar.org/87e6/cb090aecfc6f03a3b00650a5c5f475dfebe1.pdf,Holistically Constrained Local Model: Going Beyond Frontal Poses for Facial Landmark Detection,2016 +127,AFLW,aflw,34.0224149,-118.28634407,University of Southern California,edu,87e6cb090aecfc6f03a3b00650a5c5f475dfebe1,citation,https://pdfs.semanticscholar.org/87e6/cb090aecfc6f03a3b00650a5c5f475dfebe1.pdf,Holistically Constrained Local Model: Going Beyond Frontal Poses for Facial Landmark Detection,2016 +128,AFLW,aflw,40.0044795,116.370238,Chinese Academy of Sciences,edu,7fcfd72ba6bc14bbb90b31fe14c2c77a8b220ab2,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.255,Robust FEC-CNN: A High Accuracy Facial Landmark Detection System,2017 +129,AFLW,aflw,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,7fcfd72ba6bc14bbb90b31fe14c2c77a8b220ab2,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.255,Robust FEC-CNN: A High Accuracy Facial Landmark Detection System,2017 +130,AFLW,aflw,40.00229045,116.32098908,Tsinghua University,edu,3fb26f3abcf0d287243646426cd5ddeee33624d4,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.376,Joint Training of Cascaded CNN for Face Detection,2016 +131,AFLW,aflw,40.0044795,116.370238,Chinese Academy of Sciences,edu,055cd8173536031e189628c879a2acad6cf2a5d0,citation,https://doi.org/10.1109/BTAS.2017.8272740,Fast multi-view face alignment via multi-task auto-encoders,2017 +132,AFLW,aflw,31.9078499,34.81334092,Weizmann Institute of Science,edu,d4c2d26523f577e2d72fc80109e2540c887255c8,citation,http://pdfs.semanticscholar.org/d4c2/d26523f577e2d72fc80109e2540c887255c8.pdf,Face-space Action Recognition by Face-Object Interactions,2016 +133,AFLW,aflw,40.62984145,22.9588935,Aristotle University of Thessaloniki,edu,3251f40ed1113d592c61d2017e67beca66e678bb,citation,https://doi.org/10.1007/978-3-319-65172-9_17,Improving Face Pose Estimation Using Long-Term Temporal Averaging for Stochastic Optimization,2017 +134,AFLW,aflw,56.46255985,84.95565495,Tomsk Polytechnic University,edu,17ded725602b4329b1c494bfa41527482bf83a6f,citation,http://pdfs.semanticscholar.org/cb10/434a5d68ffbe9ed0498771192564ecae8894.pdf,Compact Convolutional Neural Network Cascade for Face Detection,2015 +135,AFLW,aflw,40.47913175,-74.43168868,Rutgers University,edu,c8ca6a2dc41516c16ea0747e9b3b7b1db788dbdd,citation,https://arxiv.org/pdf/1609.02825.pdf,Track Facial Points in Unconstrained Videos,2016 +136,AFLW,aflw,30.44235995,-84.29747867,Florida State University,edu,42ea8a96eea023361721f0ea34264d3d0fc49ebd,citation,https://arxiv.org/pdf/1608.04695.pdf,Parameterized Principal Component Analysis,2018 +137,AFLW,aflw,-27.49741805,153.01316956,University of Queensland,edu,de79437f74e8e3b266afc664decf4e6e4bdf34d7,citation,https://doi.org/10.1109/IVCNZ.2016.7804415,To face or not to face: Towards reducing false positive of face detection,2016 +138,AFLW,aflw,42.0551164,-87.67581113,Northwestern University,edu,7c953868cd51f596300c8231192d57c9c514ae17,citation,http://courses.cs.washington.edu/courses/cse590v/13au/CVPR13_FaceDetection.pdf,Detecting and Aligning Faces by Image Retrieval,2013 +139,AFLW,aflw,40.0044795,116.370238,Chinese Academy of Sciences,edu,19a9f658ea14701502d169dc086651b1d9b2a8ea,citation,http://www.cbsr.ia.ac.cn/users/zlei/papers/JJYan-FG2013.pdf,Structural models for face detection,2013 +140,AFLW,aflw,-27.47715625,153.02841004,Queensland University of Technology,edu,be632b206f1cd38eab0c01c5f2004d1e8fc72880,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6607601,Gradual training of cascaded shape regression for facial landmark localization and pose estimation,2013 +141,AFLW,aflw,33.6431901,-117.84016494,"University of California, Irvine",edu,0e986f51fe45b00633de9fd0c94d082d2be51406,citation,http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf,"Face detection, pose estimation, and landmark localization in the wild",2012 +142,AFLW,aflw,39.9586652,116.30971281,Beijing Institute of Technology,edu,0ea7b7fff090c707684fd4dc13e0a8f39b300a97,citation,http://arxiv.org/abs/1711.06055,Integrated Face Analytics Networks through Cross-Dataset Hybrid Training,2017 +143,AFLW,aflw,33.5866784,-101.87539204,Electrical and Computer Engineering,edu,0ea7b7fff090c707684fd4dc13e0a8f39b300a97,citation,http://arxiv.org/abs/1711.06055,Integrated Face Analytics Networks through Cross-Dataset Hybrid Training,2017 +144,AFLW,aflw,1.2962018,103.77689944,National University of Singapore,edu,0ea7b7fff090c707684fd4dc13e0a8f39b300a97,citation,http://arxiv.org/abs/1711.06055,Integrated Face Analytics Networks through Cross-Dataset Hybrid Training,2017 +145,AFLW,aflw,41.21002475,-73.80407056,IBM Thomas J. Watson Research Center,company,eb87151fd2796ff5b4bbcf1906d41d53ac6c5595,citation,https://doi.org/10.1109/ICPR.2016.7899719,Enhanced face detection using body part detections for wearable cameras,2016 +146,AFLW,aflw,29.5357046,106.60482474,Chongqing University of Posts and Telecommunications,edu,35d272877b178aa97c678e3fcbb619ff512af4c2,citation,https://doi.org/10.1109/SMC.2017.8122743,A multi-scale fusion convolutional neural network for face detection,2017 +147,AFLW,aflw,52.7663577,-1.2292461,Loughborough University,edu,9e8f95503bebdfb623d4e5b51347f72677d89d99,citation,https://pdfs.semanticscholar.org/9e8f/95503bebdfb623d4e5b51347f72677d89d99.pdf,Multi-dimensional local binary pattern texture descriptors and their application for medical image analysis,2014 +148,AFLW,aflw,40.0044795,116.370238,Chinese Academy of Sciences,edu,492f41e800c52614c5519f830e72561db205e86c,citation,http://openaccess.thecvf.com/content_cvpr_2017/papers/Lv_A_Deep_Regression_CVPR_2017_paper.pdf,A Deep Regression Architecture with Two-Stage Re-initialization for High Performance Facial Landmark Detection,2017 +149,AFLW,aflw,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,492f41e800c52614c5519f830e72561db205e86c,citation,http://openaccess.thecvf.com/content_cvpr_2017/papers/Lv_A_Deep_Regression_CVPR_2017_paper.pdf,A Deep Regression Architecture with Two-Stage Re-initialization for High Performance Facial Landmark Detection,2017 +150,AFLW,aflw,42.718568,-84.47791571,Michigan State University,edu,c6382de52636705be5898017f2f8ed7c70d7ae96,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139089,Unconstrained face detection: State of the art baseline and challenges,2015 +151,AFLW,aflw,38.95187,-77.363259,"Noblis, Falls Church, VA, U.S.A.",company,c6382de52636705be5898017f2f8ed7c70d7ae96,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139089,Unconstrained face detection: State of the art baseline and challenges,2015 +152,AFLW,aflw,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,b11bb6bd63ee6f246d278dd4edccfbe470263803,citation,http://pdfs.semanticscholar.org/b11b/b6bd63ee6f246d278dd4edccfbe470263803.pdf,Joint Voxel and Coordinate Regression for Accurate 3D Facial Landmark Localization,2018 +153,AFLW,aflw,22.53521465,113.9315911,Shenzhen University,edu,66dcd855a6772d2731b45cfdd75f084327b055c2,citation,http://pdfs.semanticscholar.org/66dc/d855a6772d2731b45cfdd75f084327b055c2.pdf,Quality Classified Image Analysis with Application to Face Detection and Recognition,2018 +154,AFLW,aflw,38.5336349,-121.79077264,"University of California, Davis",edu,fdf8e293a7618f560e76bd83e3c40a0788104547,citation,https://arxiv.org/pdf/1704.04023.pdf,Interspecies Knowledge Transfer for Facial Keypoint Detection,2017 +155,AFLW,aflw,30.19331415,120.11930822,Zhejiang University,edu,fdf8e293a7618f560e76bd83e3c40a0788104547,citation,https://arxiv.org/pdf/1704.04023.pdf,Interspecies Knowledge Transfer for Facial Keypoint Detection,2017 +156,AFLW,aflw,51.49887085,-0.17560797,Imperial College London,edu,38cbb500823057613494bacd0078aa0e57b30af8,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.252,Deep Face Deblurring,2017 +157,AFLW,aflw,22.2081469,114.25964115,University of Hong Kong,edu,fb87045600da73b07f0757f345a937b1c8097463,citation,https://pdfs.semanticscholar.org/5c54/2fef80a35a4f930e5c82040b52c58e96ce87.pdf,Reflective Regression of 2D-3D Face Shape Across Large Pose,2016 +158,AFLW,aflw,52.2380139,6.8566761,University of Twente,edu,71b07c537a9e188b850192131bfe31ef206a39a0,citation,http://pdfs.semanticscholar.org/71b0/7c537a9e188b850192131bfe31ef206a39a0.pdf,300 Faces In-The-Wild Challenge: database and results,2016 +159,AFLW,aflw,35.6924853,139.7582533,"National Institute of Informatics, Japan",edu,4dd71a097e6b3cd379d8c802460667ee0cbc8463,citation,http://www.dgcv.nii.ac.jp/Publications/Papers/2015/BWILD2015.pdf,Real-time multi-view facial landmark detector learned by the structured output SVM,2015 +160,AFLW,aflw,33.856111,-5.574391,Moulay Ismail University,edu,1fd7a17a6c630a122c1a3d1c0668d14c0c375de0,citation,https://doi.org/10.1109/CIST.2016.7805097,"Facial landmark localization: Past, present and future",2016 +161,AFLW,aflw,38.88140235,121.52281098,Dalian University of Technology,edu,940e5c45511b63f609568dce2ad61437c5e39683,citation,https://doi.org/10.1109/TIP.2015.2390976,Fiducial Facial Point Extraction Using a Novel Projective Invariant,2015 +162,AFLW,aflw,37.4102193,-122.05965487,Carnegie Mellon University,edu,6dbdb07ce2991db0f64c785ad31196dfd4dae721,citation,https://arxiv.org/pdf/1802.09058.pdf,Seeing Small Faces from Robust Anchor's Perspective,2018 +163,AFLW,aflw,30.04287695,31.23664139,American University in Cairo,edu,1a12eec3ceb1c81cde4ae6e8f27aac08b36317d4,citation,https://arxiv.org/pdf/1706.09498.pdf,Real-time Distracted Driver Posture Classification,2017 +164,AFLW,aflw,51.6091578,-3.97934429,Swansea University,edu,cc70fb1ab585378c79a2ab94776723e597afe379,citation,https://doi.org/10.1109/ICIP.2017.8297067,Detect face in the wild using CNN cascade with feature aggregation at multi-resolution,2017 +165,AFLW,aflw,51.49887085,-0.17560797,Imperial College London,edu,59d8fa6fd91cdb72cd0fa74c04016d79ef5a752b,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Zafeiriou_The_Menpo_Facial_CVPR_2017_paper.pdf,The Menpo Facial Landmark Localisation Challenge: A Step Towards the Solution,2017 +166,AFLW,aflw,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,2f61d91033a06dd904ff9d1765d57e5b4d7f57a6,citation,https://doi.org/10.1109/ICIP.2016.7532953,FCFD: Teach the machine to accomplish face detection step by step,2016 +167,AFLW,aflw,40.47913175,-74.43168868,Rutgers University,edu,04ff69aa20da4eeccdabbe127e3641b8e6502ec0,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w28/papers/Peng_Sequential_Face_Alignment_CVPR_2016_paper.pdf,Sequential Face Alignment via Person-Specific Modeling in the Wild,2016 +168,AFLW,aflw,32.7283683,-97.11201835,University of Texas at Arlington,edu,04ff69aa20da4eeccdabbe127e3641b8e6502ec0,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w28/papers/Peng_Sequential_Face_Alignment_CVPR_2016_paper.pdf,Sequential Face Alignment via Person-Specific Modeling in the Wild,2016 +169,AFLW,aflw,31.2284923,121.40211389,East China Normal University,edu,83295bce2340cb87901499cff492ae6ff3365475,citation,https://arxiv.org/pdf/1808.01558.pdf,Deep Multi-Center Learning for Face Alignment,2018 +170,AFLW,aflw,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,83295bce2340cb87901499cff492ae6ff3365475,citation,https://arxiv.org/pdf/1808.01558.pdf,Deep Multi-Center Learning for Face Alignment,2018 +171,AFLW,aflw,46.0658836,11.1159894,University of Trento,edu,f201baf618574108bcee50e9a8b65f5174d832ee,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8031057,Viewpoint-Consistent 3D Face Alignment,2018 +172,AFLW,aflw,13.65450525,100.49423171,Robotics Institute,edu,f201baf618574108bcee50e9a8b65f5174d832ee,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8031057,Viewpoint-Consistent 3D Face Alignment,2018 +173,AFLW,aflw,40.0044795,116.370238,Chinese Academy of Sciences,edu,4c6233765b5f83333f6c675d3389bbbf503805e3,citation,https://perceptual.mpi-inf.mpg.de/files/2015/03/Yan_Vis13.pdf,Real-time high performance deformable model for face detection in the wild,2013 +174,AFLW,aflw,40.51865195,-74.44099801,State University of New Jersey,edu,02820c1491b10a1ff486fed32c269e4077c36551,citation,https://arxiv.org/pdf/1610.07930v1.pdf,Active user authentication for smartphones: A challenge data set and benchmark results,2016 +175,AFLW,aflw,39.2899685,-76.62196103,University of Maryland,edu,02820c1491b10a1ff486fed32c269e4077c36551,citation,https://arxiv.org/pdf/1610.07930v1.pdf,Active user authentication for smartphones: A challenge data set and benchmark results,2016 +176,AFLW,aflw,33.776033,-84.39884086,Georgia Institute of Technology,edu,e659221538d256b2c3e0724deff749eda903fc7d,citation,https://arxiv.org/pdf/1710.00925.pdf,Fine-Grained Head Pose Estimation Without Keypoints,2017 +177,AFLW,aflw,49.20172,16.6033168,Brno University of Technology,edu,b55e70df03d9b80c91446a97957bc95772dcc45b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8269329,MixedEmotions: An Open-Source Toolbox for Multimodal Emotion Analysis,2018 +178,AFLW,aflw,48.5670466,13.4517835,University of Passau,edu,b55e70df03d9b80c91446a97957bc95772dcc45b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8269329,MixedEmotions: An Open-Source Toolbox for Multimodal Emotion Analysis,2018 +179,AFLW,aflw,50.7171497,7.12825184,"Deutsche Welle, Bonn, Germany",edu,b55e70df03d9b80c91446a97957bc95772dcc45b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8269329,MixedEmotions: An Open-Source Toolbox for Multimodal Emotion Analysis,2018 +180,AFLW,aflw,44.6531692,10.8586228,"Expert Systems, Modena, Italy",company,b55e70df03d9b80c91446a97957bc95772dcc45b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8269329,MixedEmotions: An Open-Source Toolbox for Multimodal Emotion Analysis,2018 +181,AFLW,aflw,53.27639715,-9.05829961,National University of Ireland Galway,edu,b55e70df03d9b80c91446a97957bc95772dcc45b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8269329,MixedEmotions: An Open-Source Toolbox for Multimodal Emotion Analysis,2018 +182,AFLW,aflw,40.4402995,-3.7870076,"Paradigma Digital, Madrid, Spain",company,b55e70df03d9b80c91446a97957bc95772dcc45b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8269329,MixedEmotions: An Open-Source Toolbox for Multimodal Emotion Analysis,2018 +183,AFLW,aflw,53.3498053,-6.2603097,"Siren Solutions, Dublin, Ireland",company,b55e70df03d9b80c91446a97957bc95772dcc45b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8269329,MixedEmotions: An Open-Source Toolbox for Multimodal Emotion Analysis,2018 +184,AFLW,aflw,39.86742125,32.73519072,Hacettepe University,edu,9865fe20df8fe11717d92b5ea63469f59cf1635a,citation,https://arxiv.org/pdf/1805.07566.pdf,Wildest Faces: Face Detection and Recognition in Violent Settings,2018 +185,AFLW,aflw,39.87549675,32.78553506,Middle East Technical University,edu,9865fe20df8fe11717d92b5ea63469f59cf1635a,citation,https://arxiv.org/pdf/1805.07566.pdf,Wildest Faces: Face Detection and Recognition in Violent Settings,2018 +186,AFLW,aflw,47.3764534,8.54770931,ETH Zürich,edu,961a5d5750f18e91e28a767b3cb234a77aac8305,citation,http://pdfs.semanticscholar.org/961a/5d5750f18e91e28a767b3cb234a77aac8305.pdf,Face Detection without Bells and Whistles,2014 +187,AFLW,aflw,40.51865195,-74.44099801,State University of New Jersey,edu,0d746111135c2e7f91443869003d05cde3044beb,citation,https://doi.org/10.1109/ICIP.2016.7532908,Partial face detection for continuous authentication,2016 +188,AFLW,aflw,39.2899685,-76.62196103,University of Maryland,edu,0d746111135c2e7f91443869003d05cde3044beb,citation,https://doi.org/10.1109/ICIP.2016.7532908,Partial face detection for continuous authentication,2016 +189,AFLW,aflw,34.0224149,-118.28634407,University of Southern California,edu,eb6ee56e085ebf473da990d032a4249437a3e462,citation,http://www-scf.usc.edu/~chuntinh/doc/Age_Gender_Classification_APSIPA_2017.pdf,Age/gender classification with whole-component convolutional neural networks (WC-CNN),2017 +190,AFLW,aflw,40.0044795,116.370238,Chinese Academy of Sciences,edu,2a4153655ad1169d482e22c468d67f3bc2c49f12,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhu_Face_Alignment_Across_CVPR_2016_paper.pdf,Face Alignment Across Large Poses: A 3D Solution,2016 +191,AFLW,aflw,42.718568,-84.47791571,Michigan State University,edu,2a4153655ad1169d482e22c468d67f3bc2c49f12,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhu_Face_Alignment_Across_CVPR_2016_paper.pdf,Face Alignment Across Large Poses: A 3D Solution,2016 +192,AFLW,aflw,36.3697191,127.362537,Korea Advanced Institute of Science and Technology,edu,ca8f23d9b9a40016eaf0467a3df46720ac718e1d,citation,https://doi.org/10.1109/ICASSP.2015.7178214,Face detection using Local Hybrid Patterns,2015 +193,AFLW,aflw,33.5866784,-101.87539204,Electrical and Computer Engineering,edu,ebb3d5c70bedf2287f9b26ac0031004f8f617b97,citation,https://doi.org/10.1109/MSP.2017.2764116,"Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans",2018 +194,AFLW,aflw,39.2899685,-76.62196103,University of Maryland,edu,ebb3d5c70bedf2287f9b26ac0031004f8f617b97,citation,https://doi.org/10.1109/MSP.2017.2764116,"Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans",2018 +195,AFLW,aflw,-33.8809651,151.20107299,University of Technology Sydney,edu,bbf28f39e5038813afd74cf1bc78d55fcbe630f1,citation,https://arxiv.org/pdf/1803.04108.pdf,Style Aggregated Network for Facial Landmark Detection,2018 +196,AFLW,aflw,-33.95828745,18.45997349,University of Cape Town,edu,36e8ef2e5d52a78dddf0002e03918b101dcdb326,citation,http://www.milbo.org/stasm-files/multiview-active-shape-models-with-sift-for-300w.pdf,Multiview Active Shape Models with SIFT Descriptors for the 300-W Face Landmark Challenge,2013 +197,AFLW,aflw,42.7298459,-73.67950216,Rensselaer Polytechnic Institute,edu,1b794b944fd462a2742b6c2f8021fecc663004c9,citation,http://arxiv.org/abs/1709.05732,A Hierarchical Probabilistic Model for Facial Feature Detection,2014 +198,AFLW,aflw,40.47913175,-74.43168868,Rutgers University,edu,afdf9a3464c3b015f040982750f6b41c048706f5,citation,https://arxiv.org/pdf/1608.05477.pdf,A Recurrent Encoder-Decoder Network for Sequential Face Alignment,2016 +199,AFLW,aflw,50.3755269,-4.13937687,Plymouth University,edu,239958d6778643101ab631ec354ea1bc4d33e7e0,citation,http://doi.org/10.1016/j.patcog.2017.06.009,Head pose estimation in the wild using Convolutional Neural Networks and adaptive gradient methods,2017 +200,AFLW,aflw,39.2899685,-76.62196103,University of Maryland,edu,40c8cffd5aac68f59324733416b6b2959cb668fd,citation,http://arxiv.org/abs/1701.08341,Pooling Facial Segments to Face: The Shallow and Deep Ends,2017 +201,AFLW,aflw,-27.49741805,153.01316956,University of Queensland,edu,28646c6220848db46c6944967298d89a6559c700,citation,https://pdfs.semanticscholar.org/2864/6c6220848db46c6944967298d89a6559c700.pdf,It takes two to tango : Cascading off-the-shelf face detectors,2018 +202,AFLW,aflw,37.4102193,-122.05965487,Carnegie Mellon University,edu,48a9241edda07252c1aadca09875fabcfee32871,citation,https://arxiv.org/pdf/1611.08657v5.pdf,Convolutional Experts Constrained Local Model for Facial Landmark Detection,2017 +203,AFLW,aflw,52.2380139,6.8566761,University of Twente,edu,044d9a8c61383312cdafbcc44b9d00d650b21c70,citation,https://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_iccv_2013_300_w.pdf,300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge,2013 +204,AFLW,aflw,51.49887085,-0.17560797,Imperial College London,edu,044d9a8c61383312cdafbcc44b9d00d650b21c70,citation,https://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_iccv_2013_300_w.pdf,300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge,2013 +205,AFLW,aflw,53.22853665,-0.54873472,University of Lincoln,edu,044d9a8c61383312cdafbcc44b9d00d650b21c70,citation,https://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_iccv_2013_300_w.pdf,300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge,2013 +206,AFLW,aflw,52.9387428,-1.20029569,University of Nottingham,edu,4cd0da974af9356027a31b8485a34a24b57b8b90,citation,https://arxiv.org/pdf/1703.00862v2.pdf,Binarized Convolutional Landmark Localizers for Human Pose Estimation and Face Alignment with Limited Resources,2017 +207,AFLW,aflw,41.70456775,-86.23822026,University of Notre Dame,edu,17479e015a2dcf15d40190e06419a135b66da4e0,citation,https://arxiv.org/pdf/1610.08119.pdf,Predicting First Impressions With Deep Learning,2017 +208,AFLW,aflw,30.274084,120.15507,Alibaba,company,89497854eada7e32f06aa8f3c0ceedc0e91ecfef,citation,https://doi.org/10.1109/TIP.2017.2784571,Deep Context-Sensitive Facial Landmark Detection With Tree-Structured Modeling,2018 +209,AFLW,aflw,30.19331415,120.11930822,Zhejiang University,edu,89497854eada7e32f06aa8f3c0ceedc0e91ecfef,citation,https://doi.org/10.1109/TIP.2017.2784571,Deep Context-Sensitive Facial Landmark Detection With Tree-Structured Modeling,2018 +210,AFLW,aflw,32.77824165,34.99565673,Open University of Israel,edu,0a34fe39e9938ae8c813a81ae6d2d3a325600e5c,citation,https://arxiv.org/pdf/1708.07517.pdf,FacePoseNet: Making a Case for Landmark-Free Face Alignment,2017 diff --git a/site/datasets/final/casia_webface.csv b/site/datasets/final/casia_webface.csv new file mode 100644 index 00000000..2cbffd5d --- /dev/null +++ b/site/datasets/final/casia_webface.csv @@ -0,0 +1,312 @@ +index,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,CASIA Webface,casia_webface,0.0,0.0,,,853bd61bc48a431b9b1c7cab10c603830c488e39,main,http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf,Learning Face Representation from Scratch,2014 +1,CASIA Webface,casia_webface,39.2899685,-76.62196103,University of Maryland,edu,def2983576001bac7d6461d78451159800938112,citation,https://arxiv.org/pdf/1705.07426.pdf,The Do’s and Don’ts for CNN-Based Face Verification,2017 +2,CASIA Webface,casia_webface,38.5336349,-121.79077264,"University of California, Davis",edu,e94dfdc5581f6bc0338e21ad555b5f1734f8697e,citation,https://arxiv.org/pdf/1803.11556.pdf,Learning to Anonymize Faces for Privacy Preserving Action Detection,2018 +3,CASIA Webface,casia_webface,24.7925484,120.9951183,National Tsing Hua University,edu,68c3e61cefcfe4812df54be12625dabe66fb06a4,citation,https://pdfs.semanticscholar.org/68c3/e61cefcfe4812df54be12625dabe66fb06a4.pdf,A Compact Deep Learning Model for Robust Facial Expression Recognition,0 +4,CASIA Webface,casia_webface,23.0502042,113.39880323,South China University of Technology,edu,4bd3de97b256b96556d19a5db71dda519934fd53,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.529,Latent Factor Guided Convolutional Neural Networks for Age-Invariant Face Recognition,2016 +5,CASIA Webface,casia_webface,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,4bd3de97b256b96556d19a5db71dda519934fd53,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.529,Latent Factor Guided Convolutional Neural Networks for Age-Invariant Face Recognition,2016 +6,CASIA Webface,casia_webface,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,cd6aaa37fffd0b5c2320f386be322b8adaa1cc68,citation,https://arxiv.org/pdf/1804.06655.pdf,Deep Face Recognition: A Survey,2018 +7,CASIA Webface,casia_webface,36.3697191,127.362537,Korea Advanced Institute of Science and Technology,edu,2744e6d526b8f2c1b297ac2d2458aaa08b0cda11,citation,http://doi.org/10.1007/s11042-017-5571-3,Example image-based feature extraction for face recognition,2017 +8,CASIA Webface,casia_webface,37.2830003,127.04548469,Ajou University,edu,2744e6d526b8f2c1b297ac2d2458aaa08b0cda11,citation,http://doi.org/10.1007/s11042-017-5571-3,Example image-based feature extraction for face recognition,2017 +9,CASIA Webface,casia_webface,46.0501558,14.46907327,University of Ljubljana,edu,368d59cf1733af511ed8abbcbeb4fb47afd4da1c,citation,http://pdfs.semanticscholar.org/368d/59cf1733af511ed8abbcbeb4fb47afd4da1c.pdf,To Frontalize or Not To Frontalize: A Study of Face Pre-Processing Techniques and Their Impact on Recognition,2016 +10,CASIA Webface,casia_webface,41.70456775,-86.23822026,University of Notre Dame,edu,368d59cf1733af511ed8abbcbeb4fb47afd4da1c,citation,http://pdfs.semanticscholar.org/368d/59cf1733af511ed8abbcbeb4fb47afd4da1c.pdf,To Frontalize or Not To Frontalize: A Study of Face Pre-Processing Techniques and Their Impact on Recognition,2016 +11,CASIA Webface,casia_webface,32.1119889,34.80459702,Tel Aviv University,edu,63a6c256ec2cf2e0e0c9a43a085f5bc94af84265,citation,https://doi.org/10.1109/ICPR.2016.7899662,Complexity of multiverse networks and their multilayer generalization,2016 +12,CASIA Webface,casia_webface,42.3383668,-71.08793524,Northeastern University,edu,c9efcd8e32dced6efa2bba64789df8d0a8e4996a,citation,http://dl.acm.org/citation.cfm?id=2984060,Deep Convolutional Neural Network with Independent Softmax for Large Scale Face Recognition,2016 +13,CASIA Webface,casia_webface,31.846918,117.29053367,Hefei University of Technology,edu,f6e6b4d0b7c16112dcb71ff502033a2187b1ec9b,citation,https://doi.org/10.1109/TMM.2015.2476657,Understanding Blooming Human Groups in Social Networks,2015 +14,CASIA Webface,casia_webface,29.58333105,-98.61944505,University of Texas at San Antonio,edu,f6e6b4d0b7c16112dcb71ff502033a2187b1ec9b,citation,https://doi.org/10.1109/TMM.2015.2476657,Understanding Blooming Human Groups in Social Networks,2015 +15,CASIA Webface,casia_webface,1.2962018,103.77689944,National University of Singapore,edu,f6e6b4d0b7c16112dcb71ff502033a2187b1ec9b,citation,https://doi.org/10.1109/TMM.2015.2476657,Understanding Blooming Human Groups in Social Networks,2015 +16,CASIA Webface,casia_webface,48.8476037,2.2639934,"Université Paris-Saclay, France",edu,96e318f8ff91ba0b10348d4de4cb7c2142eb8ba9,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8364450,State-of-the-art face recognition performance using publicly available software and datasets,2018 +17,CASIA Webface,casia_webface,32.77824165,34.99565673,Open University of Israel,edu,870433ba89d8cab1656e57ac78f1c26f4998edfb,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.163,Regressing Robust and Discriminative 3D Morphable Models with a Very Deep Neural Network,2017 +18,CASIA Webface,casia_webface,51.5231607,-0.1282037,University College London,edu,c53352a4239568cc915ad968aff51c49924a3072,citation,http://pdfs.semanticscholar.org/c533/52a4239568cc915ad968aff51c49924a3072.pdf,Transfer Representation-Learning for Anomaly Detection,2016 +19,CASIA Webface,casia_webface,25.01682835,121.53846924,National Taiwan University,edu,17423fe480b109e1d924314c1dddb11b084e8a42,citation,https://pdfs.semanticscholar.org/1742/3fe480b109e1d924314c1dddb11b084e8a42.pdf,Deep Disguised Faces Recognition,0 +20,CASIA Webface,casia_webface,33.5866784,-101.87539204,Electrical and Computer Engineering,edu,ebb3d5c70bedf2287f9b26ac0031004f8f617b97,citation,https://doi.org/10.1109/MSP.2017.2764116,"Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans",2018 +21,CASIA Webface,casia_webface,39.2899685,-76.62196103,University of Maryland,edu,ebb3d5c70bedf2287f9b26ac0031004f8f617b97,citation,https://doi.org/10.1109/MSP.2017.2764116,"Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans",2018 +22,CASIA Webface,casia_webface,31.846918,117.29053367,Hefei University of Technology,edu,1ba9d12f24ac04f0309e8ff9b0162c6e18d97dc3,citation,http://doi.acm.org/10.1145/2964284.2984061,Robust Face Recognition with Deep Multi-View Representation Learning,2016 +23,CASIA Webface,casia_webface,1.2962018,103.77689944,National University of Singapore,edu,1ba9d12f24ac04f0309e8ff9b0162c6e18d97dc3,citation,http://doi.acm.org/10.1145/2964284.2984061,Robust Face Recognition with Deep Multi-View Representation Learning,2016 +24,CASIA Webface,casia_webface,41.70456775,-86.23822026,University of Notre Dame,edu,73ea06787925157df519a15ee01cc3dc1982a7e0,citation,https://arxiv.org/pdf/1811.01474.pdf,Fast Face Image Synthesis with Minimal Training,2018 +25,CASIA Webface,casia_webface,22.53521465,113.9315911,Shenzhen University,edu,1d7df3df839a6aa8f5392310d46b2a89080a3c25,citation,https://arxiv.org/pdf/1612.02295.pdf,Large-Margin Softmax Loss for Convolutional Neural Networks,2016 +26,CASIA Webface,casia_webface,23.0502042,113.39880323,South China University of Technology,edu,1d7df3df839a6aa8f5392310d46b2a89080a3c25,citation,https://arxiv.org/pdf/1612.02295.pdf,Large-Margin Softmax Loss for Convolutional Neural Networks,2016 +27,CASIA Webface,casia_webface,37.4102193,-122.05965487,Carnegie Mellon University,edu,edff76149ec44f6849d73f019ef9bded534a38c2,citation,https://arxiv.org/pdf/1704.02203.pdf,Privacy-Preserving Visual Learning Using Doubly Permuted Homomorphic Encryption,2017 +28,CASIA Webface,casia_webface,42.718568,-84.47791571,Michigan State University,edu,edff76149ec44f6849d73f019ef9bded534a38c2,citation,https://arxiv.org/pdf/1704.02203.pdf,Privacy-Preserving Visual Learning Using Doubly Permuted Homomorphic Encryption,2017 +29,CASIA Webface,casia_webface,35.9020448,139.93622009,University of Tokyo,edu,edff76149ec44f6849d73f019ef9bded534a38c2,citation,https://arxiv.org/pdf/1704.02203.pdf,Privacy-Preserving Visual Learning Using Doubly Permuted Homomorphic Encryption,2017 +30,CASIA Webface,casia_webface,37.4102193,-122.05965487,Carnegie Mellon University,edu,bd8f77b7d3b9d272f7a68defc1412f73e5ac3135,citation,https://arxiv.org/pdf/1704.08063.pdf,SphereFace: Deep Hypersphere Embedding for Face Recognition,2017 +31,CASIA Webface,casia_webface,33.776033,-84.39884086,Georgia Institute of Technology,edu,bd8f77b7d3b9d272f7a68defc1412f73e5ac3135,citation,https://arxiv.org/pdf/1704.08063.pdf,SphereFace: Deep Hypersphere Embedding for Face Recognition,2017 +32,CASIA Webface,casia_webface,23.09461185,113.28788994,Sun Yat-Sen University,edu,bd8f77b7d3b9d272f7a68defc1412f73e5ac3135,citation,https://arxiv.org/pdf/1704.08063.pdf,SphereFace: Deep Hypersphere Embedding for Face Recognition,2017 +33,CASIA Webface,casia_webface,33.776033,-84.39884086,Georgia Institute of Technology,edu,9b2a272d4526b3eeeda0beb0d399074d5380a2b3,citation,https://arxiv.org/pdf/1808.01424.pdf,Learning to Align Images Using Weak Geometric Supervision,2018 +34,CASIA Webface,casia_webface,47.6423318,-122.1369302,Microsoft,company,9b2a272d4526b3eeeda0beb0d399074d5380a2b3,citation,https://arxiv.org/pdf/1808.01424.pdf,Learning to Align Images Using Weak Geometric Supervision,2018 +35,CASIA Webface,casia_webface,40.51865195,-74.44099801,State University of New Jersey,edu,96e731e82b817c95d4ce48b9e6b08d2394937cf8,citation,http://arxiv.org/pdf/1508.01722v2.pdf,Unconstrained face verification using deep CNN features,2016 +36,CASIA Webface,casia_webface,39.2899685,-76.62196103,University of Maryland,edu,96e731e82b817c95d4ce48b9e6b08d2394937cf8,citation,http://arxiv.org/pdf/1508.01722v2.pdf,Unconstrained face verification using deep CNN features,2016 +37,CASIA Webface,casia_webface,36.3697191,127.362537,Korea Advanced Institute of Science and Technology,edu,75858dbee2c248a60741fbc64dcad4f8b63d51cb,citation,https://doi.org/10.1109/TIP.2015.2460464,Markov Network-Based Unified Classifier for Face Recognition,2015 +38,CASIA Webface,casia_webface,42.718568,-84.47791571,Michigan State University,edu,a2b4a6c6b32900a066d0257ae6d4526db872afe2,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272466,Learning Face Image Quality From Human Assessments,2018 +39,CASIA Webface,casia_webface,32.1119889,34.80459702,Tel Aviv University,edu,7859667ed6c05a467dfc8a322ecd0f5e2337db56,citation,http://pdfs.semanticscholar.org/7859/667ed6c05a467dfc8a322ecd0f5e2337db56.pdf,Web-Scale Transfer Learning for Unconstrained 1:N Face Identification,2015 +40,CASIA Webface,casia_webface,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,b8084d5e193633462e56f897f3d81b2832b72dff,citation,http://pdfs.semanticscholar.org/b808/4d5e193633462e56f897f3d81b2832b72dff.pdf,DeepID3: Face Recognition with Very Deep Neural Networks,2015 +41,CASIA Webface,casia_webface,22.42031295,114.20788644,Chinese University of Hong Kong,edu,b8084d5e193633462e56f897f3d81b2832b72dff,citation,http://pdfs.semanticscholar.org/b808/4d5e193633462e56f897f3d81b2832b72dff.pdf,DeepID3: Face Recognition with Very Deep Neural Networks,2015 +42,CASIA Webface,casia_webface,30.19331415,120.11930822,Zhejiang University,edu,969fd48e1a668ab5d3c6a80a3d2aeab77067c6ce,citation,http://pdfs.semanticscholar.org/969f/d48e1a668ab5d3c6a80a3d2aeab77067c6ce.pdf,End-To-End Face Detection and Recognition,2017 +43,CASIA Webface,casia_webface,42.3889785,-72.5286987,University of Massachusetts,edu,368e99f669ea5fd395b3193cd75b301a76150f9d,citation,https://arxiv.org/pdf/1506.01342.pdf,One-to-many face recognition with bilinear CNNs,2016 +44,CASIA Webface,casia_webface,1.3484104,103.68297965,Nanyang Technological University,edu,b2470969e4fba92f7909eac26b77d08cc5575533,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8326475,Profit Maximization Mechanism and Data Management for Data Analytics Services,2018 +45,CASIA Webface,casia_webface,49.2579566,7.04577417,Max Planck Institute for Informatics,edu,0c59071ddd33849bd431165bc2d21bbe165a81e0,citation,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Oh_Person_Recognition_in_ICCV_2015_paper.pdf,Person Recognition in Personal Photo Collections,2015 +46,CASIA Webface,casia_webface,51.7534538,-1.25400997,University of Oxford,edu,eb027969f9310e0ae941e2adee2d42cdf07d938c,citation,https://arxiv.org/pdf/1710.08092.pdf,VGGFace2: A Dataset for Recognising Faces across Pose and Age,2018 +47,CASIA Webface,casia_webface,25.2873992,110.3324277,Guilin University of Electronic Technology Guangxi Guilin,edu,9989ad33b64accea8042e386ff3f1216386ba7f1,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393320,Facial feature extraction method based on shallow and deep fusion CNN,2017 +48,CASIA Webface,casia_webface,51.49887085,-0.17560797,Imperial College London,edu,809ea255d144cff780300440d0f22c96e98abd53,citation,http://pdfs.semanticscholar.org/809e/a255d144cff780300440d0f22c96e98abd53.pdf,ArcFace: Additive Angular Margin Loss for Deep Face Recognition,2018 +49,CASIA Webface,casia_webface,29.7207902,-95.34406271,University of Houston,edu,38d8ff137ff753f04689e6b76119a44588e143f3,citation,http://pdfs.semanticscholar.org/38d8/ff137ff753f04689e6b76119a44588e143f3.pdf,When 3D-Aided 2D Face Recognition Meets Deep Learning: An extended UR2D for Pose-Invariant Face Recognition,2017 +50,CASIA Webface,casia_webface,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,fb1627ed224bf7b1e3d80c097316ed7703951df2,citation,https://doi.org/10.1109/VCIP.2017.8305094,Deep transfer network for face recognition using 3D synthesized face,2017 +51,CASIA Webface,casia_webface,39.65404635,-79.96475355,West Virginia University,edu,7a65fc9e78eff3ab6062707deaadde024d2fad40,citation,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Zhu_A_Study_on_ICCV_2015_paper.pdf,A Study on Apparent Age Estimation,2015 +52,CASIA Webface,casia_webface,59.34986645,18.07063213,"KTH Royal Institute of Technology, Stockholm",edu,633101e794d7b80f55f466fd2941ea24595e10e6,citation,https://pdfs.semanticscholar.org/6331/01e794d7b80f55f466fd2941ea24595e10e6.pdf,Face Attribute Prediction with classification CNN,2016 +53,CASIA Webface,casia_webface,23.09461185,113.28788994,Sun Yat-Sen University,edu,80d42f74ee9bf03f3790c8d0f5a307deffe0b3b7,citation,https://doi.org/10.1109/TNNLS.2016.2522431,Learning Kernel Extended Dictionary for Face Recognition,2017 +54,CASIA Webface,casia_webface,39.329053,-76.619425,Johns Hopkins University,edu,4317856a1458baa427dc00e8ea505d2fc5f118ab,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8296449,Regularizing face verification nets for pain intensity regression,2017 +55,CASIA Webface,casia_webface,29.7207902,-95.34406271,University of Houston,edu,7fcd03407c084023606c901e8933746b80d2ad57,citation,https://doi.org/10.1109/BTAS.2017.8272694,Local classifier chains for deep face recognition,2017 +56,CASIA Webface,casia_webface,39.329053,-76.619425,Johns Hopkins University,edu,92be73dffd3320fe7734258961fe5a5f2a43390e,citation,https://pdfs.semanticscholar.org/92be/73dffd3320fe7734258961fe5a5f2a43390e.pdf,Transferring Face Verification Nets To Pain and Expression Regression,2017 +57,CASIA Webface,casia_webface,40.00229045,116.32098908,Tsinghua University,edu,92be73dffd3320fe7734258961fe5a5f2a43390e,citation,https://pdfs.semanticscholar.org/92be/73dffd3320fe7734258961fe5a5f2a43390e.pdf,Transferring Face Verification Nets To Pain and Expression Regression,2017 +58,CASIA Webface,casia_webface,50.7338124,7.1022465,Rheinische-Friedrich-Wilhelms University,edu,561ae67de137e75e9642ab3512d3749b34484310,citation,http://pdfs.semanticscholar.org/561a/e67de137e75e9642ab3512d3749b34484310.pdf,DeepGestalt - Identifying Rare Genetic Syndromes Using Deep Learning,2018 +59,CASIA Webface,casia_webface,32.1119889,34.80459702,Tel Aviv University,edu,561ae67de137e75e9642ab3512d3749b34484310,citation,http://pdfs.semanticscholar.org/561a/e67de137e75e9642ab3512d3749b34484310.pdf,DeepGestalt - Identifying Rare Genetic Syndromes Using Deep Learning,2018 +60,CASIA Webface,casia_webface,32.87935255,-117.23110049,"University of California, San Diego",edu,561ae67de137e75e9642ab3512d3749b34484310,citation,http://pdfs.semanticscholar.org/561a/e67de137e75e9642ab3512d3749b34484310.pdf,DeepGestalt - Identifying Rare Genetic Syndromes Using Deep Learning,2018 +61,CASIA Webface,casia_webface,43.7776426,11.259765,University of Florence,edu,746c0205fdf191a737df7af000eaec9409ede73f,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8423119,Investigating Nuisances in DCNN-Based Face Recognition,2018 +62,CASIA Webface,casia_webface,50.7791703,6.06728733,RWTH Aachen University,edu,c10b0a6ba98aa95d740a0d60e150ffd77c7895ad,citation,http://pdfs.semanticscholar.org/c10b/0a6ba98aa95d740a0d60e150ffd77c7895ad.pdf,Deep Fisher Faces,2017 +63,CASIA Webface,casia_webface,28.54632595,77.27325504,Indian Institute of Technology Delhi,edu,cbb27980eb04f68d9f10067d3d3c114efa9d0054,citation,https://arxiv.org/pdf/1807.03380.pdf,An Attention Model for Group-Level Emotion Recognition,2018 +64,CASIA Webface,casia_webface,39.9922379,116.30393816,Peking University,edu,8bf243817112ac0aa1348b40a065bb0b735cdb9c,citation,http://pdfs.semanticscholar.org/8bf2/43817112ac0aa1348b40a065bb0b735cdb9c.pdf,Learning a Repression Network for Precise Vehicle Search,2017 +65,CASIA Webface,casia_webface,51.7534538,-1.25400997,University of Oxford,edu,30180f66d5b4b7c0367e4b43e2b55367b72d6d2a,citation,http://www.robots.ox.ac.uk/~vgg/publications/2017/Crosswhite17/crosswhite17.pdf,Template Adaptation for Face Verification and Identification,2017 +66,CASIA Webface,casia_webface,40.0044795,116.370238,Chinese Academy of Sciences,edu,55fdff2881d43050a8c51c7fdc094dbfbbe6fa46,citation,https://doi.org/10.1109/ICB.2016.7550064,Transferring deep representation for NIR-VIS heterogeneous face recognition,2016 +67,CASIA Webface,casia_webface,59.34986645,18.07063213,"KTH Royal Institute of Technology, Stockholm",edu,6d07e176c754ac42773690d4b4919a39df85d7ec,citation,https://pdfs.semanticscholar.org/6d07/e176c754ac42773690d4b4919a39df85d7ec.pdf,Face Attribute Prediction Using Off-The-Shelf Deep Learning Networks,2016 +68,CASIA Webface,casia_webface,52.2380139,6.8566761,University of Twente,edu,fd9feb21b3d1fab470ff82e3f03efce6a0e67a1f,citation,http://pdfs.semanticscholar.org/fd9f/eb21b3d1fab470ff82e3f03efce6a0e67a1f.pdf,Deep Verification Learning,2016 +69,CASIA Webface,casia_webface,32.77824165,34.99565673,Open University of Israel,edu,1e6ed6ca8209340573a5e907a6e2e546a3bf2d28,citation,http://arxiv.org/pdf/1607.01450v1.pdf,Pooling Faces: Template Based Face Recognition with Pooled Face Images,2016 +70,CASIA Webface,casia_webface,51.49887085,-0.17560797,Imperial College London,edu,8e0ab1b08964393e4f9f42ca037220fe98aad7ac,citation,https://arxiv.org/pdf/1712.04695.pdf,UV-GAN: Adversarial Facial UV Map Completion for Pose-invariant Face Recognition,2017 +71,CASIA Webface,casia_webface,40.0044795,116.370238,Chinese Academy of Sciences,edu,68c4a1d438ea1c6dfba92e3aee08d48f8e7f7090,citation,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Liu_AgeNet_Deeply_Learned_ICCV_2015_paper.pdf,AgeNet: Deeply Learned Regressor and Classifier for Robust Apparent Age Estimation,2015 +72,CASIA Webface,casia_webface,40.47913175,-74.43168868,Rutgers University,edu,76669f166ddd3fb830dbaacb3daa875cfedc24d9,citation,https://doi.org/10.1109/ICPR.2016.7899840,Learning face recognition from limited training data using deep neural networks,2016 +73,CASIA Webface,casia_webface,41.21002475,-73.80407056,IBM Thomas J. Watson Research Center,company,76669f166ddd3fb830dbaacb3daa875cfedc24d9,citation,https://doi.org/10.1109/ICPR.2016.7899840,Learning face recognition from limited training data using deep neural networks,2016 +74,CASIA Webface,casia_webface,37.5557271,127.0436642,Hanyang University,edu,946017d5f11aa582854ac4c0e0f1b18b06127ef1,citation,https://pdfs.semanticscholar.org/9460/17d5f11aa582854ac4c0e0f1b18b06127ef1.pdf,Tracking Persons-of-Interest via Adaptive Discriminative Features,2016 +75,CASIA Webface,casia_webface,37.36566745,-120.42158888,"University of California, Merced",edu,946017d5f11aa582854ac4c0e0f1b18b06127ef1,citation,https://pdfs.semanticscholar.org/9460/17d5f11aa582854ac4c0e0f1b18b06127ef1.pdf,Tracking Persons-of-Interest via Adaptive Discriminative Features,2016 +76,CASIA Webface,casia_webface,40.11116745,-88.22587665,"University of Illinois, Urbana-Champaign",edu,946017d5f11aa582854ac4c0e0f1b18b06127ef1,citation,https://pdfs.semanticscholar.org/9460/17d5f11aa582854ac4c0e0f1b18b06127ef1.pdf,Tracking Persons-of-Interest via Adaptive Discriminative Features,2016 +77,CASIA Webface,casia_webface,47.6423318,-122.1369302,Microsoft,company,291265db88023e92bb8c8e6390438e5da148e8f5,citation,http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf,MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition,2016 +78,CASIA Webface,casia_webface,39.2899685,-76.62196103,University of Maryland,edu,24e82eaf3257e761d6ca0ffcc2cbca30dfca82e9,citation,https://doi.org/10.1109/GlobalSIP.2016.7906030,An analysis of the robustness of deep face recognition networks to noisy training labels,2016 +79,CASIA Webface,casia_webface,39.65404635,-79.96475355,West Virginia University,edu,24e82eaf3257e761d6ca0ffcc2cbca30dfca82e9,citation,https://doi.org/10.1109/GlobalSIP.2016.7906030,An analysis of the robustness of deep face recognition networks to noisy training labels,2016 +80,CASIA Webface,casia_webface,39.9808333,116.34101249,Beihang University,edu,a961f1234e963a7945fed70197015678149b37d8,citation,http://dl.acm.org/citation.cfm?id=3206068,Facial Expression Synthesis by U-Net Conditional Generative Adversarial Networks,2018 +81,CASIA Webface,casia_webface,29.82366295,106.42050016,Southwest University,edu,11a47a91471f40af5cf00449954474fd6e9f7694,citation,http://pdfs.semanticscholar.org/11a4/7a91471f40af5cf00449954474fd6e9f7694.pdf,NIRFaceNet: A Convolutional Neural Network for Near-Infrared Face Identification,2016 +82,CASIA Webface,casia_webface,25.2873992,110.3324277,Guilin University of Electronic Technology Guangxi Guilin,edu,ef2bb8bd93fa8b44414565b32735334fa6823b56,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393076,An accurate and efficient face recognition method based on hash coding,2017 +83,CASIA Webface,casia_webface,39.2899685,-76.62196103,University of Maryland,edu,23dd8d17ce09c22d367e4d62c1ccf507bcbc64da,citation,https://pdfs.semanticscholar.org/23dd/8d17ce09c22d367e4d62c1ccf507bcbc64da.pdf,Deep Density Clustering of Unconstrained Faces ( Supplementary Material ),2018 +84,CASIA Webface,casia_webface,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,3dfb822e16328e0f98a47209d7ecd242e4211f82,citation,https://arxiv.org/pdf/1708.08197.pdf,Cross-Age LFW: A Database for Studying Cross-Age Face Recognition in Unconstrained Environments,2017 +85,CASIA Webface,casia_webface,40.0044795,116.370238,Chinese Academy of Sciences,edu,64b9ad39d115f3e375bde4f70fb8fdef5d681df8,citation,https://doi.org/10.1109/ICB.2016.7550088,Bootstrapping Joint Bayesian model for robust face verification,2016 +86,CASIA Webface,casia_webface,32.77824165,34.99565673,Open University of Israel,edu,c75e6ce54caf17b2780b4b53f8d29086b391e839,citation,https://arxiv.org/pdf/1802.00542.pdf,"ExpNet: Landmark-Free, Deep, 3D Facial Expressions",2018 +87,CASIA Webface,casia_webface,22.53521465,113.9315911,Shenzhen University,edu,66dcd855a6772d2731b45cfdd75f084327b055c2,citation,http://pdfs.semanticscholar.org/66dc/d855a6772d2731b45cfdd75f084327b055c2.pdf,Quality Classified Image Analysis with Application to Face Detection and Recognition,2018 +88,CASIA Webface,casia_webface,42.3383668,-71.08793524,Northeastern University,edu,e00d4e4ba25fff3583b180db078ef962bf7d6824,citation,http://pdfs.semanticscholar.org/e00d/4e4ba25fff3583b180db078ef962bf7d6824.pdf,Face Verification with Multi-Task and Multi-Scale Features Fusion,2017 +89,CASIA Webface,casia_webface,37.4102193,-122.05965487,Carnegie Mellon University,edu,831d661d657d97a07894da8639a048c430c5536d,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.19,Weakly Supervised Facial Analysis with Dense Hyper-Column Features,2016 +90,CASIA Webface,casia_webface,39.2899685,-76.62196103,University of Maryland,edu,5865b6d83ba6dbbf9167f1481e9339c2ef1d1f6b,citation,https://doi.org/10.1109/ICPR.2016.7900278,Regularized metric adaptation for unconstrained face verification,2016 +91,CASIA Webface,casia_webface,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,e9c008d31da38d9eef67a28d2c77cb7daec941fb,citation,https://arxiv.org/pdf/1708.03769.pdf,Noisy Softmax: Improving the Generalization Ability of DCNN via Postponing the Early Softmax Saturation,2017 +92,CASIA Webface,casia_webface,23.09461185,113.28788994,Sun Yat-Sen University,edu,c675534be881e59a78a5986b8fb4e649ddd2abbe,citation,https://doi.org/10.1109/ICIP.2017.8296548,Face recognition by landmark pooling-based CNN with concentrate loss,2017 +93,CASIA Webface,casia_webface,40.51865195,-74.44099801,State University of New Jersey,edu,ea03a569272d329090fe60d6bff8d119e18057d7,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532906,Fisher vector encoded deep convolutional features for unconstrained face verification,2016 +94,CASIA Webface,casia_webface,39.2899685,-76.62196103,University of Maryland,edu,ea03a569272d329090fe60d6bff8d119e18057d7,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532906,Fisher vector encoded deep convolutional features for unconstrained face verification,2016 +95,CASIA Webface,casia_webface,40.0044795,116.370238,Chinese Academy of Sciences,edu,a73405038fdc0d8bf986539ef755a80ebd341e97,citation,https://doi.org/10.1109/TIP.2017.2698918,Conditional High-Order Boltzmann Machines for Supervised Relation Learning,2017 +96,CASIA Webface,casia_webface,36.20304395,117.05842113,Tianjin University,edu,5180df9d5eb26283fb737f491623395304d57497,citation,https://arxiv.org/pdf/1804.10899.pdf,Scalable Angular Discriminative Deep Metric Learning for Face Recognition,2018 +97,CASIA Webface,casia_webface,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,3ac3a714042d3ebc159546c26321a1f8f4f5f80c,citation,http://dl.acm.org/citation.cfm?id=3025149,Clustering lightened deep representation for large scale face identification,2017 +98,CASIA Webface,casia_webface,37.26728,126.9841151,Seoul National University,edu,282503fa0285240ef42b5b4c74ae0590fe169211,citation,http://pdfs.semanticscholar.org/2825/03fa0285240ef42b5b4c74ae0590fe169211.pdf,Feeding Hand-Crafted Features for Enhancing the Performance of Convolutional Neural Networks,2018 +99,CASIA Webface,casia_webface,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,29db046dd1f8100b279c3f5f5c5ef19bdbf5af9a,citation,https://arxiv.org/pdf/1706.04717.pdf,Recent Progress of Face Image Synthesis,2017 +100,CASIA Webface,casia_webface,39.2899685,-76.62196103,University of Maryland,edu,4f7b92bd678772552b3c3edfc9a7c5c4a8c60a8e,citation,https://pdfs.semanticscholar.org/4f7b/92bd678772552b3c3edfc9a7c5c4a8c60a8e.pdf,Deep Density Clustering of Unconstrained Faces,0 +101,CASIA Webface,casia_webface,30.40550035,-91.18620474,Louisiana State University,edu,9f65319b8a33c8ec11da2f034731d928bf92e29d,citation,http://pdfs.semanticscholar.org/9f65/319b8a33c8ec11da2f034731d928bf92e29d.pdf,Taking Roll: a Pipeline for Face Recognition,2018 +102,CASIA Webface,casia_webface,41.10427915,29.02231159,Istanbul Technical University,edu,d3d5d86afec84c0713ec868cf5ed41661fc96edc,citation,https://arxiv.org/pdf/1606.02894.pdf,A Comprehensive Analysis of Deep Learning Based Representation for Face Recognition,2016 +103,CASIA Webface,casia_webface,40.8927159,29.37863323,Sabanci University,edu,d3d5d86afec84c0713ec868cf5ed41661fc96edc,citation,https://arxiv.org/pdf/1606.02894.pdf,A Comprehensive Analysis of Deep Learning Based Representation for Face Recognition,2016 +104,CASIA Webface,casia_webface,31.76909325,117.17795091,Anhui University,edu,b910590a0eb191d03e1aedb3d55c905129e92e6b,citation,http://doi.acm.org/10.1145/2808492.2808570,Robust gender classification on unconstrained face images,2015 +105,CASIA Webface,casia_webface,40.0044795,116.370238,Chinese Academy of Sciences,edu,b910590a0eb191d03e1aedb3d55c905129e92e6b,citation,http://doi.acm.org/10.1145/2808492.2808570,Robust gender classification on unconstrained face images,2015 +106,CASIA Webface,casia_webface,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,b8bcf9c773da1c5ee76db4bf750c9ff5d159f1a0,citation,http://doi.acm.org/10.1145/2911996.2911999,Homemade TS-Net for Automatic Face Recognition,2016 +107,CASIA Webface,casia_webface,39.86742125,32.73519072,Hacettepe University,edu,9865fe20df8fe11717d92b5ea63469f59cf1635a,citation,https://arxiv.org/pdf/1805.07566.pdf,Wildest Faces: Face Detection and Recognition in Violent Settings,2018 +108,CASIA Webface,casia_webface,39.87549675,32.78553506,Middle East Technical University,edu,9865fe20df8fe11717d92b5ea63469f59cf1635a,citation,https://arxiv.org/pdf/1805.07566.pdf,Wildest Faces: Face Detection and Recognition in Violent Settings,2018 +109,CASIA Webface,casia_webface,46.0501558,14.46907327,University of Ljubljana,edu,69adbfa7b0b886caac15ebe53b89adce390598a3,citation,https://arxiv.org/pdf/1805.10938.pdf,Face hallucination using cascaded super-resolution and identity priors,2018 +110,CASIA Webface,casia_webface,41.70456775,-86.23822026,University of Notre Dame,edu,69adbfa7b0b886caac15ebe53b89adce390598a3,citation,https://arxiv.org/pdf/1805.10938.pdf,Face hallucination using cascaded super-resolution and identity priors,2018 +111,CASIA Webface,casia_webface,42.3383668,-71.08793524,Northeastern University,edu,feea73095b1be0cbae1ad7af8ba2c4fb6f316d35,citation,http://dl.acm.org/citation.cfm?id=3126693,Deep Face Recognition with Center Invariant Loss,2017 +112,CASIA Webface,casia_webface,35.9990522,-78.9290629,Duke University,edu,3f0c51989c516a7c5dee7dec4d7fb474ae6c28d9,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.720,Not Afraid of the Dark: NIR-VIS Face Recognition via Cross-Spectral Hallucination and Low-Rank Embedding,2017 +113,CASIA Webface,casia_webface,49.2579566,7.04577417,Max Planck Institute for Informatics,edu,91a4ebf1ca0314a74c436729700ef09bddaa6222,citation,https://arxiv.org/pdf/1808.01338.pdf,Detailed Human Avatars from Monocular Video,2018 +114,CASIA Webface,casia_webface,47.5612651,7.5752961,University of Basel,edu,0081e2188c8f34fcea3e23c49fb3e17883b33551,citation,http://pdfs.semanticscholar.org/0081/e2188c8f34fcea3e23c49fb3e17883b33551.pdf,Training Deep Face Recognition Systems with Synthetic Data,2018 +115,CASIA Webface,casia_webface,24.4399419,118.09301781,Xiamen University,edu,57ba4b6de23a6fc9d45ff052ed2563e5de00b968,citation,https://doi.org/10.1109/ICIP.2017.8296993,An efficient deep neural networks training framework for robust face recognition,2017 +116,CASIA Webface,casia_webface,40.0044795,116.370238,Chinese Academy of Sciences,edu,43fe03ec1acb6ea9d05d2b22eeddb2631bd30437,citation,https://doi.org/10.1109/ICIP.2017.8296394,Weakly supervised multiscale-inception learning for web-scale face recognition,2017 +117,CASIA Webface,casia_webface,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,0be418e63d111e3b94813875f75909e4dc27d13a,citation,https://doi.org/10.1109/ICB.2016.7550057,Fine-grained LFW database,2016 +118,CASIA Webface,casia_webface,40.51865195,-74.44099801,State University of New Jersey,edu,d00e9a6339e34c613053d3b2c132fccbde547b56,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791154,A cascaded convolutional neural network for age estimation of unconstrained faces,2016 +119,CASIA Webface,casia_webface,39.2899685,-76.62196103,University of Maryland,edu,d00e9a6339e34c613053d3b2c132fccbde547b56,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791154,A cascaded convolutional neural network for age estimation of unconstrained faces,2016 +120,CASIA Webface,casia_webface,42.718568,-84.47791571,Michigan State University,edu,450c6a57f19f5aa45626bb08d7d5d6acdb863b4b,citation,https://arxiv.org/pdf/1805.00611.pdf,Towards Interpretable Face Recognition,2018 +121,CASIA Webface,casia_webface,30.2931534,120.1620458,Zhejiang University of Technology,edu,cb9921d5fc4ffa50be537332e111f03d74622442,citation,https://doi.org/10.1007/978-3-319-46654-5_79,Face Occlusion Detection Using Cascaded Convolutional Neural Network,2016 +122,CASIA Webface,casia_webface,29.6328784,-82.3490133,University of Florida,edu,291de30ceecb5dcf0644c35e2b5935d341ea148b,citation,https://arxiv.org/pdf/1810.00024.pdf,Explainable Black-Box Attacks Against Model-based Authentication,2018 +123,CASIA Webface,casia_webface,42.3383668,-71.08793524,Northeastern University,edu,3f540faf85e1f8de6ce04fb37e556700b67e4ad3,citation,http://pdfs.semanticscholar.org/3f54/0faf85e1f8de6ce04fb37e556700b67e4ad3.pdf,Face Verification with Multi-Task and Multi-Scale Feature Fusion,2017 +124,CASIA Webface,casia_webface,29.7207902,-95.34406271,University of Houston,edu,8334da483f1986aea87b62028672836cb3dc6205,citation,https://arxiv.org/pdf/1805.06306.pdf,Fully Associative Patch-Based 1-to-N Matcher for Face Recognition,2018 +125,CASIA Webface,casia_webface,39.2899685,-76.62196103,University of Maryland,edu,0019925779bff96448f0c75492717e4473f88377,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w3/papers/Reale_Deep_Heterogeneous_Face_CVPR_2017_paper.pdf,Deep Heterogeneous Face Recognition Networks Based on Cross-Modal Distillation and an Equitable Distance Metric,2017 +126,CASIA Webface,casia_webface,45.7413921,126.62552755,Harbin Institute of Technology,edu,05455f5e3c3989be4991cb74b73cdfd0d6522622,citation,https://arxiv.org/pdf/1804.04829.pdf,Learning Warped Guidance for Blind Face Restoration,2018 +127,CASIA Webface,casia_webface,23.09461185,113.28788994,Sun Yat-Sen University,edu,05455f5e3c3989be4991cb74b73cdfd0d6522622,citation,https://arxiv.org/pdf/1804.04829.pdf,Learning Warped Guidance for Blind Face Restoration,2018 +128,CASIA Webface,casia_webface,38.0333742,-84.5017758,University of Kentucky,edu,05455f5e3c3989be4991cb74b73cdfd0d6522622,citation,https://arxiv.org/pdf/1804.04829.pdf,Learning Warped Guidance for Blind Face Restoration,2018 +129,CASIA Webface,casia_webface,51.49887085,-0.17560797,Imperial College London,edu,54bb25a213944b08298e4e2de54f2ddea890954a,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf,"AgeDB: The First Manually Collected, In-the-Wild Age Database",2017 +130,CASIA Webface,casia_webface,51.59029705,-0.22963221,Middlesex University,edu,54bb25a213944b08298e4e2de54f2ddea890954a,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf,"AgeDB: The First Manually Collected, In-the-Wild Age Database",2017 +131,CASIA Webface,casia_webface,25.01682835,121.53846924,National Taiwan University,edu,81884e1de00e59f24bc20254584d73a1a1806933,citation,https://arxiv.org/pdf/1811.02328.pdf,Super-Identity Convolutional Neural Network for Face Hallucination,2018 +132,CASIA Webface,casia_webface,39.993008,116.329882,SenseTime,company,81884e1de00e59f24bc20254584d73a1a1806933,citation,https://arxiv.org/pdf/1811.02328.pdf,Super-Identity Convolutional Neural Network for Face Hallucination,2018 +133,CASIA Webface,casia_webface,30.284151,-97.73195598,University of Texas at Austin,edu,81884e1de00e59f24bc20254584d73a1a1806933,citation,https://arxiv.org/pdf/1811.02328.pdf,Super-Identity Convolutional Neural Network for Face Hallucination,2018 +134,CASIA Webface,casia_webface,47.6543238,-122.30800894,University of Washington,edu,405526dfc79de98f5bf3c97bf4aa9a287700f15d,citation,http://pdfs.semanticscholar.org/8a6c/57fcd99a77982ec754e0b97fd67519ccb60c.pdf,MegaFace: A Million Faces for Recognition at Scale,2015 +135,CASIA Webface,casia_webface,32.77824165,34.99565673,Open University of Israel,edu,582edc19f2b1ab2ac6883426f147196c8306685a,citation,http://pdfs.semanticscholar.org/be6c/db7b181e73f546d43cf2ab6bc7181d7d619b.pdf,Do We Really Need to Collect Millions of Faces for Effective Face Recognition?,2016 +136,CASIA Webface,casia_webface,36.3697191,127.362537,Korea Advanced Institute of Science and Technology,edu,076d3fc800d882445c11b9af466c3af7d2afc64f,citation,http://slsp.kaist.ac.kr/paperdata/Face_attribute_classification.pdf,Face attribute classification using attribute-aware correlation map and gated convolutional neural networks,2015 +137,CASIA Webface,casia_webface,31.83907195,117.26420748,University of Science and Technology of China,edu,e1256ff535bf4c024dd62faeb2418d48674ddfa2,citation,https://arxiv.org/pdf/1803.11182.pdf,Towards Open-Set Identity Preserving Face Synthesis,2018 +138,CASIA Webface,casia_webface,36.383765,127.36694,"Electronics and Telecommunications Research Institute, Daejeon, Korea",edu,77c5437107f8138d48cb7e10b2b286fa51473678,citation,https://doi.org/10.1109/URAI.2016.7734005,A pseudo ensemble convolutional neural networks,2016 +139,CASIA Webface,casia_webface,36.3851395,127.3683413,"University of Science and Technology, Korea",edu,77c5437107f8138d48cb7e10b2b286fa51473678,citation,https://doi.org/10.1109/URAI.2016.7734005,A pseudo ensemble convolutional neural networks,2016 +140,CASIA Webface,casia_webface,40.00229045,116.32098908,Tsinghua University,edu,93eb3963bc20e28af26c53ef3bce1e76b15e3209,citation,https://doi.org/10.1109/ICIP.2017.8296992,Occlusion robust face recognition based on mask learning,2017 +141,CASIA Webface,casia_webface,40.0141905,-83.0309143,University of Electronic Science and Technology of China,edu,16b9d258547f1eccdb32111c9f45e2e4bbee79af,citation,https://arxiv.org/pdf/1704.06369.pdf,NormFace: L2 Hypersphere Embedding for Face Verification,2017 +142,CASIA Webface,casia_webface,39.94976005,116.33629046,Beijing Jiaotong University,edu,7e2cfbfd43045fbd6aabd9a45090a5716fc4e179,citation,https://arxiv.org/pdf/1808.00435.pdf,Global Norm-Aware Pooling for Pose-Robust Face Recognition at Low False Positive Rate,2018 +143,CASIA Webface,casia_webface,40.0044795,116.370238,Chinese Academy of Sciences,edu,99facca6fc50cc30f13b7b6dd49ace24bc94f702,citation,https://arxiv.org/pdf/1609.03892.pdf,VIPLFaceNet: an open source deep face recognition SDK,2016 +144,CASIA Webface,casia_webface,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,99facca6fc50cc30f13b7b6dd49ace24bc94f702,citation,https://arxiv.org/pdf/1609.03892.pdf,VIPLFaceNet: an open source deep face recognition SDK,2016 +145,CASIA Webface,casia_webface,25.01682835,121.53846924,National Taiwan University,edu,b50edfea790f86373407a964b4255bf8e436d377,citation,http://doi.acm.org/10.1145/3136755.3143008,Group emotion recognition with individual facial emotion CNNs and global image based CNNs,2017 +146,CASIA Webface,casia_webface,1.2962018,103.77689944,National University of Singapore,edu,c17c7b201cfd0bcd75441afeaa734544c6ca3416,citation,https://doi.org/10.1109/TCSVT.2016.2587389,Layerwise Class-Aware Convolutional Neural Network,2017 +147,CASIA Webface,casia_webface,32.0575279,118.78682252,Southeast University,edu,c17c7b201cfd0bcd75441afeaa734544c6ca3416,citation,https://doi.org/10.1109/TCSVT.2016.2587389,Layerwise Class-Aware Convolutional Neural Network,2017 +148,CASIA Webface,casia_webface,22.3386304,114.2620337,Hong Kong University of Science and Technology,edu,585260468d023ffc95f0e539c3fa87254c28510b,citation,http://pdfs.semanticscholar.org/5852/60468d023ffc95f0e539c3fa87254c28510b.pdf,Cardea: Context-Aware Visual Privacy Protection from Pervasive Cameras,2016 +149,CASIA Webface,casia_webface,39.65404635,-79.96475355,West Virginia University,edu,3b9b200e76a35178da940279d566bbb7dfebb787,citation,http://pdfs.semanticscholar.org/3b9b/200e76a35178da940279d566bbb7dfebb787.pdf,Learning Channel Inter-dependencies at Multiple Scales on Dense Networks for Face Recognition,2017 +150,CASIA Webface,casia_webface,39.65404635,-79.96475355,West Virginia University,edu,8bfada57140aa1aa22a575e960c2a71140083293,citation,http://pdfs.semanticscholar.org/8bfa/da57140aa1aa22a575e960c2a71140083293.pdf,Can we match Ultraviolet Face Images against their Visible Counterparts?,2015 +151,CASIA Webface,casia_webface,40.0044795,116.370238,Chinese Academy of Sciences,edu,72a7eb68f0955564e1ceafa75aeeb6b5bbb14e7e,citation,https://pdfs.semanticscholar.org/72a7/eb68f0955564e1ceafa75aeeb6b5bbb14e7e.pdf,Face Recognition with Contrastive Convolution,2018 +152,CASIA Webface,casia_webface,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,72a7eb68f0955564e1ceafa75aeeb6b5bbb14e7e,citation,https://pdfs.semanticscholar.org/72a7/eb68f0955564e1ceafa75aeeb6b5bbb14e7e.pdf,Face Recognition with Contrastive Convolution,2018 +153,CASIA Webface,casia_webface,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,44b827df6c433ca49bcf44f9f3ebfdc0774ee952,citation,https://doi.org/10.1109/LSP.2017.2726105,Deep Correlation Feature Learning for Face Verification in the Wild,2017 +154,CASIA Webface,casia_webface,22.42031295,114.20788644,Chinese University of Hong Kong,edu,58d76380d194248b3bb291b8c7c5137a0a376897,citation,https://pdfs.semanticscholar.org/58d7/6380d194248b3bb291b8c7c5137a0a376897.pdf,FaceID-GAN : Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis,2018 +155,CASIA Webface,casia_webface,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,58d76380d194248b3bb291b8c7c5137a0a376897,citation,https://pdfs.semanticscholar.org/58d7/6380d194248b3bb291b8c7c5137a0a376897.pdf,FaceID-GAN : Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis,2018 +156,CASIA Webface,casia_webface,39.2899685,-76.62196103,University of Maryland,edu,b6f758be954d34817d4ebaa22b30c63a4b8ddb35,citation,http://arxiv.org/abs/1703.04835,A Proximity-Aware Hierarchical Clustering of Faces,2017 +157,CASIA Webface,casia_webface,28.2290209,112.99483204,"National University of Defense Technology, China",edu,511a8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7,citation,https://pdfs.semanticscholar.org/511a/8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7.pdf,A Community Detection Approach to Cleaning Extremely Large Face Database,2018 +158,CASIA Webface,casia_webface,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,cdef0eaff4a3c168290d238999fc066ebc3a93e8,citation,https://arxiv.org/pdf/1707.07391.pdf,Contrastive-center loss for deep neural networks,2017 +159,CASIA Webface,casia_webface,30.2810654,120.02139087,"Alibaba Group, Hangzhou, China",edu,1e62ca5845a6f0492574a5da049e9b43dbeadb1b,citation,https://doi.org/10.1109/LSP.2016.2637400,Cross-Modality Face Recognition via Heterogeneous Joint Bayesian,2017 +160,CASIA Webface,casia_webface,40.0044795,116.370238,Chinese Academy of Sciences,edu,1e62ca5845a6f0492574a5da049e9b43dbeadb1b,citation,https://doi.org/10.1109/LSP.2016.2637400,Cross-Modality Face Recognition via Heterogeneous Joint Bayesian,2017 +161,CASIA Webface,casia_webface,47.6543238,-122.30800894,University of Washington,edu,96e0cfcd81cdeb8282e29ef9ec9962b125f379b0,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527,The MegaFace Benchmark: 1 Million Faces for Recognition at Scale,2016 +162,CASIA Webface,casia_webface,40.9153196,-73.1270626,Stony Brook University,edu,40883844c1ceab95cb92498a92bfdf45beaa288e,citation,https://arxiv.org/pdf/1709.02848.pdf,Improving Heterogeneous Face Recognition with Conditional Adversarial Networks,2017 +163,CASIA Webface,casia_webface,51.7534538,-1.25400997,University of Oxford,edu,8ec82da82416bb8da8cdf2140c740e1574eaf84f,citation,http://pdfs.semanticscholar.org/8ec8/2da82416bb8da8cdf2140c740e1574eaf84f.pdf,Lip Reading in Profile,2017 +164,CASIA Webface,casia_webface,35.9042272,-78.85565763,"IBM Research, North Carolina",company,61efeb64e8431cfbafa4b02eb76bf0c58e61a0fa,citation,https://arxiv.org/pdf/1809.01604.pdf,Merging datasets through deep learning,2018 +165,CASIA Webface,casia_webface,39.2899685,-76.62196103,University of Maryland,edu,a8748a79e8d37e395354ba7a8b3038468cb37e1f,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.47,Seeing the Forest from the Trees: A Holistic Approach to Near-Infrared Heterogeneous Face Recognition,2016 +166,CASIA Webface,casia_webface,39.65404635,-79.96475355,West Virginia University,edu,a8748a79e8d37e395354ba7a8b3038468cb37e1f,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.47,Seeing the Forest from the Trees: A Holistic Approach to Near-Infrared Heterogeneous Face Recognition,2016 +167,CASIA Webface,casia_webface,37.3936717,-122.0807262,Facebook,company,628a3f027b7646f398c68a680add48c7969ab1d9,citation,https://pdfs.semanticscholar.org/628a/3f027b7646f398c68a680add48c7969ab1d9.pdf,Plan for Final Year Project : HKU-Face : A Large Scale Dataset for Deep Face Recognition,2017 +168,CASIA Webface,casia_webface,38.88140235,121.52281098,Dalian University of Technology,edu,052f994898c79529955917f3dfc5181586282cf8,citation,https://arxiv.org/pdf/1708.02191.pdf,Unsupervised Domain Adaptation for Face Recognition in Unlabeled Videos,2017 +169,CASIA Webface,casia_webface,37.4102193,-122.05965487,Carnegie Mellon University,edu,d35534f3f59631951011539da2fe83f2844ca245,citation,https://arxiv.org/pdf/1705.07904.pdf,Semantically Decomposing the Latent Spaces of Generative Adversarial Networks,2017 +170,CASIA Webface,casia_webface,37.43131385,-122.16936535,Stanford University,edu,d35534f3f59631951011539da2fe83f2844ca245,citation,https://arxiv.org/pdf/1705.07904.pdf,Semantically Decomposing the Latent Spaces of Generative Adversarial Networks,2017 +171,CASIA Webface,casia_webface,32.87935255,-117.23110049,"University of California, San Diego",edu,d35534f3f59631951011539da2fe83f2844ca245,citation,https://arxiv.org/pdf/1705.07904.pdf,Semantically Decomposing the Latent Spaces of Generative Adversarial Networks,2017 +172,CASIA Webface,casia_webface,23.0502042,113.39880323,South China University of Technology,edu,6880013eb0b91a2b334e0be0dced0a1a79943469,citation,https://arxiv.org/pdf/1810.11809.pdf,Discrimination-aware Channel Pruning for Deep Neural Networks,2018 +173,CASIA Webface,casia_webface,32.7283683,-97.11201835,University of Texas at Arlington,edu,6880013eb0b91a2b334e0be0dced0a1a79943469,citation,https://arxiv.org/pdf/1810.11809.pdf,Discrimination-aware Channel Pruning for Deep Neural Networks,2018 +174,CASIA Webface,casia_webface,39.65404635,-79.96475355,West Virginia University,edu,9fb93b7c2bae866608f26c4254e5bd69cc5031d6,citation,https://arxiv.org/pdf/1809.08999.pdf,Fast Geometrically-Perturbed Adversarial Faces,2018 +175,CASIA Webface,casia_webface,32.1638824,34.8115862,FDNA Israel,company,92de9a54515f4ac8cc8e4e6b0dfab20e5e6bb09d,citation,https://doi.org/10.1109/ICIP.2016.7533062,Quality scores for deep regression systems,2016 +176,CASIA Webface,casia_webface,39.2899685,-76.62196103,University of Maryland,edu,2d748f8ee023a5b1fbd50294d176981ded4ad4ee,citation,http://pdfs.semanticscholar.org/2d74/8f8ee023a5b1fbd50294d176981ded4ad4ee.pdf,Triplet Similarity Embedding for Face Verification,2016 +177,CASIA Webface,casia_webface,40.51865195,-74.44099801,State University of New Jersey,edu,5495e224ac7b45b9edc5cfeabbb754d8a40a879b,citation,http://pdfs.semanticscholar.org/5495/e224ac7b45b9edc5cfeabbb754d8a40a879b.pdf,Feature Reconstruction Disentangling for Pose-invariant Face Recognition Supplementary Material,2017 +178,CASIA Webface,casia_webface,32.87935255,-117.23110049,"University of California, San Diego",edu,5495e224ac7b45b9edc5cfeabbb754d8a40a879b,citation,http://pdfs.semanticscholar.org/5495/e224ac7b45b9edc5cfeabbb754d8a40a879b.pdf,Feature Reconstruction Disentangling for Pose-invariant Face Recognition Supplementary Material,2017 +179,CASIA Webface,casia_webface,42.718568,-84.47791571,Michigan State University,edu,cd55fb30737625e86454a2861302b96833ed549d,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139094,Annotating Unconstrained Face Imagery: A scalable approach,2015 +180,CASIA Webface,casia_webface,38.95187,-77.363259,"Noblis, Falls Church, VA, U.S.A.",company,cd55fb30737625e86454a2861302b96833ed549d,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139094,Annotating Unconstrained Face Imagery: A scalable approach,2015 +181,CASIA Webface,casia_webface,22.2081469,114.25964115,University of Hong Kong,edu,7ffef9f26c39377ee937d29b8990580266a7a8a5,citation,https://arxiv.org/pdf/1810.06951.pdf,Deep Metric Learning with Hierarchical Triplet Loss,2018 +182,CASIA Webface,casia_webface,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,565f7c767e6b150ebda491e04e6b1de759fda2d4,citation,https://doi.org/10.1016/j.patcog.2016.11.023,"Fine-grained face verification: FGLFW database, baselines, and human-DCMN partnership",2017 +183,CASIA Webface,casia_webface,1.3484104,103.68297965,Nanyang Technological University,edu,0d8cec1b3f9b6e25d9d31eeb54d8894a1f2ef84f,citation,https://doi.org/10.1109/LSP.2018.2810121,Deep Coupled ResNet for Low-Resolution Face Recognition,2018 +184,CASIA Webface,casia_webface,31.30104395,121.50045497,Fudan University,edu,862d17895fe822f7111e737cbcdd042ba04377e8,citation,http://pdfs.semanticscholar.org/862d/17895fe822f7111e737cbcdd042ba04377e8.pdf,Semi-Latent GAN: Learning to generate and modify facial images from attributes,2017 +185,CASIA Webface,casia_webface,42.718568,-84.47791571,Michigan State University,edu,4b605e6a9362485bfe69950432fa1f896e7d19bf,citation,http://biometrics.cse.msu.edu/Publications/Face/BlantonAllenMillerKalkaJain_CVPRWB2016_HID.pdf,A Comparison of Human and Automated Face Verification Accuracy on Unconstrained Image Sets,2016 +186,CASIA Webface,casia_webface,39.9586652,116.30971281,Beijing Institute of Technology,edu,14d72dc9f78d65534c68c3ed57305f14bd4b5753,citation,http://openaccess.thecvf.com/content_ICCV_2017/papers/Yan_Exploiting_Multi-Grain_Ranking_ICCV_2017_paper.pdf,Exploiting Multi-grain Ranking Constraints for Precisely Searching Visually-similar Vehicles,2017 +187,CASIA Webface,casia_webface,39.9922379,116.30393816,Peking University,edu,14d72dc9f78d65534c68c3ed57305f14bd4b5753,citation,http://openaccess.thecvf.com/content_ICCV_2017/papers/Yan_Exploiting_Multi-Grain_Ranking_ICCV_2017_paper.pdf,Exploiting Multi-grain Ranking Constraints for Precisely Searching Visually-similar Vehicles,2017 +188,CASIA Webface,casia_webface,35.0274996,135.78154513,University of Caen,edu,0ad8149318912b5449085187eb3521786a37bc78,citation,http://arxiv.org/abs/1604.02975,CP-mtML: Coupled Projection Multi-Task Metric Learning for Large Scale Face Retrieval,2016 +189,CASIA Webface,casia_webface,1.3484104,103.68297965,Nanyang Technological University,edu,47190d213caef85e8b9dd0d271dbadc29ed0a953,citation,https://arxiv.org/pdf/1807.11649.pdf,The Devil of Face Recognition is in the Noise,2018 +190,CASIA Webface,casia_webface,32.87935255,-117.23110049,"University of California, San Diego",edu,47190d213caef85e8b9dd0d271dbadc29ed0a953,citation,https://arxiv.org/pdf/1807.11649.pdf,The Devil of Face Recognition is in the Noise,2018 +191,CASIA Webface,casia_webface,31.30104395,121.50045497,Fudan University,edu,c5e37630d0672e4d44f7dee83ac2c1528be41c2e,citation,http://dl.acm.org/citation.cfm?id=3078973,Multi-task Deep Neural Network for Joint Face Recognition and Facial Attribute Prediction,2017 +192,CASIA Webface,casia_webface,40.0044795,116.370238,Chinese Academy of Sciences,edu,3661a34f302883c759b9fa2ce03de0c7173d2bb2,citation,http://pdfs.semanticscholar.org/fd6d/14fb0bbca58e924c504d7dc57cb7f8d3707e.pdf,Peak-Piloted Deep Network for Facial Expression Recognition,2016 +193,CASIA Webface,casia_webface,1.2962018,103.77689944,National University of Singapore,edu,3661a34f302883c759b9fa2ce03de0c7173d2bb2,citation,http://pdfs.semanticscholar.org/fd6d/14fb0bbca58e924c504d7dc57cb7f8d3707e.pdf,Peak-Piloted Deep Network for Facial Expression Recognition,2016 +194,CASIA Webface,casia_webface,39.329053,-76.619425,Johns Hopkins University,edu,2594a77a3f0dd5073f79ba620e2f287804cec630,citation,https://arxiv.org/pdf/1702.06925v1.pdf,Regularizing face verification nets for pain intensity regression,2017 +195,CASIA Webface,casia_webface,40.00229045,116.32098908,Tsinghua University,edu,2594a77a3f0dd5073f79ba620e2f287804cec630,citation,https://arxiv.org/pdf/1702.06925v1.pdf,Regularizing face verification nets for pain intensity regression,2017 +196,CASIA Webface,casia_webface,41.70456775,-86.23822026,University of Notre Dame,edu,987a649cb33302c41412419f8eeb77048aa5513e,citation,https://arxiv.org/pdf/1803.07140.pdf,Visual Psychophysics for Making Face Recognition Algorithms More Explainable,2018 +197,CASIA Webface,casia_webface,42.36782045,-71.12666653,Harvard University,edu,987a649cb33302c41412419f8eeb77048aa5513e,citation,https://arxiv.org/pdf/1803.07140.pdf,Visual Psychophysics for Making Face Recognition Algorithms More Explainable,2018 +198,CASIA Webface,casia_webface,43.614386,7.071125,EURECOM,edu,70569810e46f476515fce80a602a210f8d9a2b95,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.105,Apparent Age Estimation from Face Images Combining General and Children-Specialized Deep Learning Models,2016 +199,CASIA Webface,casia_webface,33.776033,-84.39884086,Georgia Institute of Technology,edu,93af36da08bf99e68c9b0d36e141ed8154455ac2,citation,https://pdfs.semanticscholar.org/93af/36da08bf99e68c9b0d36e141ed8154455ac2.pdf,A Dditive M Argin S Oftmax for F Ace V Erification,2018 +200,CASIA Webface,casia_webface,40.0141905,-83.0309143,University of Electronic Science and Technology of China,edu,93af36da08bf99e68c9b0d36e141ed8154455ac2,citation,https://pdfs.semanticscholar.org/93af/36da08bf99e68c9b0d36e141ed8154455ac2.pdf,A Dditive M Argin S Oftmax for F Ace V Erification,2018 +201,CASIA Webface,casia_webface,31.30104395,121.50045497,Fudan University,edu,5a259f2f5337435f841d39dada832ab24e7b3325,citation,http://doi.acm.org/10.1145/2964284.2984059,Face Recognition via Active Annotation and Learning,2016 +202,CASIA Webface,casia_webface,40.0044795,116.370238,Chinese Academy of Sciences,edu,5a259f2f5337435f841d39dada832ab24e7b3325,citation,http://doi.acm.org/10.1145/2964284.2984059,Face Recognition via Active Annotation and Learning,2016 +203,CASIA Webface,casia_webface,39.2899685,-76.62196103,University of Maryland,edu,100105d6c97b23059f7aa70589ead2f61969fbc3,citation,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477558,Frontal to profile face verification in the wild,2016 +204,CASIA Webface,casia_webface,40.51865195,-74.44099801,State University of New Jersey,edu,100105d6c97b23059f7aa70589ead2f61969fbc3,citation,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477558,Frontal to profile face verification in the wild,2016 +205,CASIA Webface,casia_webface,22.46221665,91.96942263,Chittagong University of Engineering and Technology,edu,eed93d2e16b55142b3260d268c9e72099c53d5bc,citation,https://arxiv.org/pdf/1801.01262.pdf,ICFVR 2017: 3rd international competition on finger vein recognition,2017 +206,CASIA Webface,casia_webface,39.9922379,116.30393816,Peking University,edu,eed93d2e16b55142b3260d268c9e72099c53d5bc,citation,https://arxiv.org/pdf/1801.01262.pdf,ICFVR 2017: 3rd international competition on finger vein recognition,2017 +207,CASIA Webface,casia_webface,37.4102193,-122.05965487,Carnegie Mellon University,edu,eb8519cec0d7a781923f68fdca0891713cb81163,citation,https://arxiv.org/pdf/1703.08617.pdf,Temporal Non-volume Preserving Approach to Facial Age-Progression and Age-Invariant Face Recognition,2017 +208,CASIA Webface,casia_webface,45.57022705,-122.63709346,Concordia University,edu,eb8519cec0d7a781923f68fdca0891713cb81163,citation,https://arxiv.org/pdf/1703.08617.pdf,Temporal Non-volume Preserving Approach to Facial Age-Progression and Age-Invariant Face Recognition,2017 +209,CASIA Webface,casia_webface,30.642769,104.06751175,"Sichuan University, Chengdu",edu,8d955b025495522e67e8cb6e29436001ebbd0abb,citation,https://arxiv.org/pdf/1803.11366.pdf,Disentangling Features in 3D Face Shapes for Joint Face Reconstruction and Recognition,2018 +210,CASIA Webface,casia_webface,42.718568,-84.47791571,Michigan State University,edu,8d955b025495522e67e8cb6e29436001ebbd0abb,citation,https://arxiv.org/pdf/1803.11366.pdf,Disentangling Features in 3D Face Shapes for Joint Face Reconstruction and Recognition,2018 +211,CASIA Webface,casia_webface,23.09461185,113.28788994,Sun Yat-Sen University,edu,c92e36689ef561df726a7ae861d9c166c3934908,citation,https://doi.org/10.1109/ICPR.2016.7900140,Face hallucination by deep traversal network,2016 +212,CASIA Webface,casia_webface,22.42031295,114.20788644,Chinese University of Hong Kong,edu,2296d79753118cfcd0fecefece301557f4cb66e2,citation,https://arxiv.org/pdf/1804.03487.pdf,Exploring Disentangled Feature Representation Beyond Face Identification,2018 +213,CASIA Webface,casia_webface,39.993008,116.329882,SenseTime,company,2296d79753118cfcd0fecefece301557f4cb66e2,citation,https://arxiv.org/pdf/1804.03487.pdf,Exploring Disentangled Feature Representation Beyond Face Identification,2018 +214,CASIA Webface,casia_webface,40.51865195,-74.44099801,State University of New Jersey,edu,02820c1491b10a1ff486fed32c269e4077c36551,citation,https://arxiv.org/pdf/1610.07930v1.pdf,Active user authentication for smartphones: A challenge data set and benchmark results,2016 +215,CASIA Webface,casia_webface,39.2899685,-76.62196103,University of Maryland,edu,02820c1491b10a1ff486fed32c269e4077c36551,citation,https://arxiv.org/pdf/1610.07930v1.pdf,Active user authentication for smartphones: A challenge data set and benchmark results,2016 +216,CASIA Webface,casia_webface,42.718568,-84.47791571,Michigan State University,edu,486c9a0e5eb1e0bf107c31c2bf9689b25e18383b,citation,https://arxiv.org/pdf/1804.08790.pdf,Face Recognition: Primates in the Wild,2018 +217,CASIA Webface,casia_webface,38.8920756,-104.79716389,"University of Colorado, Colorado Springs",edu,d4f1eb008eb80595bcfdac368e23ae9754e1e745,citation,https://arxiv.org/pdf/1708.02337.pdf,Unconstrained Face Detection and Open-Set Face Recognition Challenge,2017 +218,CASIA Webface,casia_webface,40.8419836,-73.94368971,Columbia University,edu,61f93ed515b3bfac822deed348d9e21d5dffe373,citation,http://dvmmweb.cs.columbia.edu/files/set_hash_wacv17.pdf,Deep Image Set Hashing,2017 +219,CASIA Webface,casia_webface,39.2899685,-76.62196103,University of Maryland,edu,19458454308a9f56b7de76bf7d8ff8eaa52b0173,citation,https://pdfs.semanticscholar.org/1945/8454308a9f56b7de76bf7d8ff8eaa52b0173.pdf,Deep Features for Recognizing Disguised Faces in the Wild,0 +220,CASIA Webface,casia_webface,41.10427915,29.02231159,Istanbul Technical University,edu,7fb7ccc1aa093ca526f2d8b6f2c404d2c886f69a,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404767,A multi-view face database from Turkish TV series,2018 +221,CASIA Webface,casia_webface,39.9922379,116.30393816,Peking University,edu,26973cf1552250f402c82e9a4445f03fe6757b58,citation,http://doi.acm.org/10.1145/3126686.3130239,Surveillance Video Quality Assessment Based on Face Recognition,2017 +222,CASIA Webface,casia_webface,52.17638955,0.14308882,University of Cambridge,edu,dd471f321ead8b405da6194057b2778ef3db7ea7,citation,https://pdfs.semanticscholar.org/dd47/1f321ead8b405da6194057b2778ef3db7ea7.pdf,Multi-Task Adversarial Network for Disentangled Feature Learning,2018 +223,CASIA Webface,casia_webface,40.786127,29.4456329,Bilişim Technology Instititute,edu,55266ddbe9d5366e8cd1b0b645971cad6d12157a,citation,https://doi.org/10.1109/SIU.2017.7960368,Face recognition classifier based on dimension reduction in deep learning properties,2017 +224,CASIA Webface,casia_webface,42.718568,-84.47791571,Michigan State University,edu,7fb5006b6522436ece5bedf509e79bdb7b79c9a7,citation,https://pdfs.semanticscholar.org/7fb5/006b6522436ece5bedf509e79bdb7b79c9a7.pdf,Multi-Task Convolutional Neural Network for Face Recognition,2017 +225,CASIA Webface,casia_webface,39.2899685,-76.62196103,University of Maryland,edu,93420d9212dd15b3ef37f566e4d57e76bb2fab2f,citation,https://arxiv.org/pdf/1611.00851.pdf,An All-In-One Convolutional Neural Network for Face Analysis,2017 +226,CASIA Webface,casia_webface,59.34986645,18.07063213,"KTH Royal Institute of Technology, Stockholm",edu,1ed49161e58559be399ce7092569c19ddd39ca0b,citation,https://doi.org/10.1109/ICPR.2016.7899973,Transferring from face recognition to face attribute prediction through adaptive selection of off-the-shelf CNN representations,2016 +227,CASIA Webface,casia_webface,37.4102193,-122.05965487,Carnegie Mellon University,edu,82e66c4832386cafcec16b92ac88088ffd1a1bc9,citation,http://pdfs.semanticscholar.org/82e6/6c4832386cafcec16b92ac88088ffd1a1bc9.pdf,OpenFace: A general-purpose face recognition library with mobile applications,2016 +228,CASIA Webface,casia_webface,52.4004837,16.95158083,Poznan University of Technology,edu,82e66c4832386cafcec16b92ac88088ffd1a1bc9,citation,http://pdfs.semanticscholar.org/82e6/6c4832386cafcec16b92ac88088ffd1a1bc9.pdf,OpenFace: A general-purpose face recognition library with mobile applications,2016 +229,CASIA Webface,casia_webface,40.9153196,-73.1270626,Stony Brook University,edu,6fbb179a4ad39790f4558dd32316b9f2818cd106,citation,http://pdfs.semanticscholar.org/6fbb/179a4ad39790f4558dd32316b9f2818cd106.pdf,Input Aggregated Network for Face Video Representation,2016 +230,CASIA Webface,casia_webface,46.109237,7.08453549,IDIAP Research Institute,edu,b4e889af57295dff9498ba476893a359a91b8a3e,citation,https://arxiv.org/pdf/1707.02749.pdf,Improving Speaker Turn Embedding by Crossmodal Transfer Learning from Face Embedding,2017 +231,CASIA Webface,casia_webface,30.5097537,114.4062881,Huazhong University of Science and Technology,edu,5f448ab700528888019542e6fea1d1e0db6c35f2,citation,https://doi.org/10.1109/LSP.2016.2533721,Transferred Deep Convolutional Neural Network Features for Extensive Facial Landmark Localization,2016 +232,CASIA Webface,casia_webface,40.51865195,-74.44099801,State University of New Jersey,edu,438c4b320b9a94a939af21061b4502f4a86960e3,citation,https://arxiv.org/pdf/1702.03041.pdf,Reconstruction-Based Disentanglement for Pose-Invariant Face Recognition,2017 +233,CASIA Webface,casia_webface,32.87935255,-117.23110049,"University of California, San Diego",edu,438c4b320b9a94a939af21061b4502f4a86960e3,citation,https://arxiv.org/pdf/1702.03041.pdf,Reconstruction-Based Disentanglement for Pose-Invariant Face Recognition,2017 +234,CASIA Webface,casia_webface,39.94976005,116.33629046,Beijing Jiaotong University,edu,d7cbedbee06293e78661335c7dd9059c70143a28,citation,https://arxiv.org/pdf/1804.07573.pdf,MobileFaceNets: Efficient CNNs for Accurate Real-time Face Verification on Mobile Devices,2018 +235,CASIA Webface,casia_webface,58.38131405,26.72078081,University of Tartu,edu,81695fbbbea2972d7ab1bfb1f3a6a0dbd3475c0f,citation,http://pdfs.semanticscholar.org/8169/5fbbbea2972d7ab1bfb1f3a6a0dbd3475c0f.pdf,Comparison of Face Recognition Neural Networks,0 +236,CASIA Webface,casia_webface,30.642769,104.06751175,"Sichuan University, Chengdu",edu,1afef6b389bd727c566cd6fbcd99adefe4c0cf32,citation,https://doi.org/10.1109/ICB.2016.7550087,Towards resolution invariant face recognition in uncontrolled scenarios,2016 +237,CASIA Webface,casia_webface,39.65404635,-79.96475355,West Virginia University,edu,a75dfb5a839f0eb4b613d150f54a418b7812aa90,citation,http://arxiv.org/abs/1708.02314,Multibiometric secure system based on deep learning,2017 +238,CASIA Webface,casia_webface,30.642769,104.06751175,"Sichuan University, Chengdu",edu,23ecc496eaa238ac884e6bae5763f6138a9c90a3,citation,https://doi.org/10.1109/ICB.2016.7550085,Discriminative Feature Adaptation for cross-domain facial expression recognition,2016 +239,CASIA Webface,casia_webface,53.21967825,6.56251482,University of Groningen,edu,8efda5708bbcf658d4f567e3866e3549fe045bbb,citation,http://pdfs.semanticscholar.org/8efd/a5708bbcf658d4f567e3866e3549fe045bbb.pdf,Pre-trained Deep Convolutional Neural Networks for Face Recognition,2018 +240,CASIA Webface,casia_webface,-27.49741805,153.01316956,University of Queensland,edu,3c563542db664321aa77a9567c1601f425500f94,citation,https://arxiv.org/pdf/1712.02514.pdf,TV-GAN: Generative Adversarial Network Based Thermal to Visible Face Recognition,2018 +241,CASIA Webface,casia_webface,30.642769,104.06751175,"Sichuan University, Chengdu",edu,772474b5b0c90629f4d9c223fd9c1ef45e1b1e66,citation,https://doi.org/10.1109/BTAS.2017.8272716,Multi-dim: A multi-dimensional face database towards the application of 3D technology in real-world scenarios,2017 +242,CASIA Webface,casia_webface,34.0224149,-118.28634407,University of Southern California,edu,4e7ed13e541b8ed868480375785005d33530e06d,citation,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477555,Face recognition using deep multi-pose representations,2016 +243,CASIA Webface,casia_webface,42.718568,-84.47791571,Michigan State University,edu,02467703b6e087799e04e321bea3a4c354c5487d,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.27,Grouper: Optimizing Crowdsourced Face Annotations,2016 +244,CASIA Webface,casia_webface,51.24303255,-0.59001382,University of Surrey,edu,7224d58a7e1f02b84994b60dc3b84d9fe6941ff5,citation,https://arxiv.org/pdf/1504.02351.pdf,When Face Recognition Meets with Deep Learning: An Evaluation of Convolutional Neural Networks for Face Recognition,2015 +245,CASIA Webface,casia_webface,51.5247272,-0.03931035,Queen Mary University of London,edu,7224d58a7e1f02b84994b60dc3b84d9fe6941ff5,citation,https://arxiv.org/pdf/1504.02351.pdf,When Face Recognition Meets with Deep Learning: An Evaluation of Convolutional Neural Networks for Face Recognition,2015 +246,CASIA Webface,casia_webface,23.09461185,113.28788994,Sun Yat-Sen University,edu,39f525f3a0475e6bbfbe781ae3a74aca5b401125,citation,http://pdfs.semanticscholar.org/39f5/25f3a0475e6bbfbe781ae3a74aca5b401125.pdf,Deep Joint Face Hallucination and Recognition,2016 +247,CASIA Webface,casia_webface,34.2474949,108.97898751,Xi'an Jiaotong University,edu,a4bb791b135bdc721c8fcc5bdef612ca654d7377,citation,https://doi.org/10.1109/BTAS.2017.8272703,Location-sensitive sparse representation of deep normal patterns for expression-robust 3D face recognition,2017 +248,CASIA Webface,casia_webface,29.7207902,-95.34406271,University of Houston,edu,3cb2841302af1fb9656f144abc79d4f3d0b27380,citation,https://pdfs.semanticscholar.org/3cb2/841302af1fb9656f144abc79d4f3d0b27380.pdf,When 3 D-Aided 2 D Face Recognition Meets Deep Learning : An extended UR 2 D for Pose-Invariant Face Recognition,2017 +249,CASIA Webface,casia_webface,34.0224149,-118.28634407,University of Southern California,edu,d28d32af7ef9889ef9cb877345a90ea85e70f7f1,citation,http://doi.ieeecomputersociety.org/10.1109/FG.2017.84,Local-Global Landmark Confidences for Face Recognition,2017 +250,CASIA Webface,casia_webface,37.4102193,-122.05965487,Carnegie Mellon University,edu,d28d32af7ef9889ef9cb877345a90ea85e70f7f1,citation,http://doi.ieeecomputersociety.org/10.1109/FG.2017.84,Local-Global Landmark Confidences for Face Recognition,2017 +251,CASIA Webface,casia_webface,39.87391435,116.47722285,Beijing University of Technology,edu,f1d6da83dcf71eda45a56a86c5ae13e7f45a8536,citation,https://doi.org/10.1109/ACCESS.2017.2737544,A Secure Face-Verification Scheme Based on Homomorphic Encryption and Deep Neural Networks,2017 +252,CASIA Webface,casia_webface,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,68e6cfb0d7423d3fae579919046639c8e2d04ad7,citation,https://doi.org/10.1109/ICB.2016.7550058,Multi-task ConvNet for blind face inpainting with application to face verification,2016 +253,CASIA Webface,casia_webface,-32.00686365,115.89691775,Curtin University,edu,e9a5a38e7da3f0aa5d21499149536199f2e0e1f7,citation,https://pdfs.semanticscholar.org/e9a5/a38e7da3f0aa5d21499149536199f2e0e1f7.pdf,A Bayesian Scene-Prior-Based Deep Network Model for Face Verification,2018 +254,CASIA Webface,casia_webface,42.718568,-84.47791571,Michigan State University,edu,82eff71af91df2ca18aebb7f1153a7aed16ae7cc,citation,https://pdfs.semanticscholar.org/82ef/f71af91df2ca18aebb7f1153a7aed16ae7cc.pdf,MSU-AVIS dataset : Fusing Face and Voice Modalities for Biometric Recognition in Indoor Surveillance Videos,2018 +255,CASIA Webface,casia_webface,36.3697191,127.362537,Korea Advanced Institute of Science and Technology,edu,cb27b45329d61f5f95ed213798d4b2a615e76be2,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8329236,Deep Facial Age Estimation Using Conditional Multitask Learning With Weak Label Expansion,2018 +256,CASIA Webface,casia_webface,37.2520226,127.0555019,"Samsung SAIT, Korea",company,cb27b45329d61f5f95ed213798d4b2a615e76be2,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8329236,Deep Facial Age Estimation Using Conditional Multitask Learning With Weak Label Expansion,2018 +257,CASIA Webface,casia_webface,40.00229045,116.32098908,Tsinghua University,edu,f5eb0cf9c57716618fab8e24e841f9536057a28a,citation,https://arxiv.org/pdf/1803.02988.pdf,Rethinking Feature Distribution for Loss Functions in Image Classification,2018 +258,CASIA Webface,casia_webface,40.00229045,116.32098908,Tsinghua University,edu,2149d49c84a83848d6051867290d9c8bfcef0edb,citation,https://doi.org/10.1109/TIFS.2017.2746062,Label-Sensitive Deep Metric Learning for Facial Age Estimation,2018 +259,CASIA Webface,casia_webface,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,4cfd770ccecae1c0b4248bc800d7fd35c817bbbd,citation,https://pdfs.semanticscholar.org/8774/e206564df3bf9050f8c2be6b434cc2469c5b.pdf,A Discriminative Feature Learning Approach for Deep Face Recognition,2016 +260,CASIA Webface,casia_webface,22.42031295,114.20788644,Chinese University of Hong Kong,edu,4cfd770ccecae1c0b4248bc800d7fd35c817bbbd,citation,https://pdfs.semanticscholar.org/8774/e206564df3bf9050f8c2be6b434cc2469c5b.pdf,A Discriminative Feature Learning Approach for Deep Face Recognition,2016 +261,CASIA Webface,casia_webface,37.4102193,-122.05965487,Carnegie Mellon University,edu,2679e4f84c5e773cae31cef158eb358af475e22f,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w6/papers/Liu_Adaptive_Deep_Metric_CVPR_2017_paper.pdf,Adaptive Deep Metric Learning for Identity-Aware Facial Expression Recognition,2017 +262,CASIA Webface,casia_webface,22.304572,114.17976285,Hong Kong Polytechnic University,edu,2679e4f84c5e773cae31cef158eb358af475e22f,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w6/papers/Liu_Adaptive_Deep_Metric_CVPR_2017_paper.pdf,Adaptive Deep Metric Learning for Identity-Aware Facial Expression Recognition,2017 +263,CASIA Webface,casia_webface,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,2679e4f84c5e773cae31cef158eb358af475e22f,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w6/papers/Liu_Adaptive_Deep_Metric_CVPR_2017_paper.pdf,Adaptive Deep Metric Learning for Identity-Aware Facial Expression Recognition,2017 +264,CASIA Webface,casia_webface,51.24303255,-0.59001382,University of Surrey,edu,9103148dd87e6ff9fba28509f3b265e1873166c9,citation,http://pdfs.semanticscholar.org/9103/148dd87e6ff9fba28509f3b265e1873166c9.pdf,Face Analysis using 3D Morphable Models,2015 +265,CASIA Webface,casia_webface,40.00229045,116.32098908,Tsinghua University,edu,e8523c4ac9d7aa21f3eb4062e09f2a3bc1eedcf7,citation,https://arxiv.org/pdf/1701.07174.pdf,Toward End-to-End Face Recognition Through Alignment Learning,2017 +266,CASIA Webface,casia_webface,40.00229045,116.32098908,Tsinghua University,edu,a52a69bf304d49fba6eac6a73c5169834c77042d,citation,https://doi.org/10.1109/LSP.2017.2789251,Margin Loss: Making Faces More Separable,2018 +267,CASIA Webface,casia_webface,24.78676765,120.99724412,National Chiao Tung University,edu,15ef65fd68d61f3d47326e358c446b0f054f093a,citation,https://doi.org/10.1109/MLSP.2017.8168180,Learning guided convolutional neural networks for cross-resolution face recognition,2017 +268,CASIA Webface,casia_webface,25.0411727,121.6146518,"Academia Sinica, Taiwan",edu,15ef65fd68d61f3d47326e358c446b0f054f093a,citation,https://doi.org/10.1109/MLSP.2017.8168180,Learning guided convolutional neural networks for cross-resolution face recognition,2017 +269,CASIA Webface,casia_webface,39.2899685,-76.62196103,University of Maryland,edu,8d3e95c31c93548b8c71dbeee2e9f7180067a888,citation,https://doi.org/10.1109/ICPR.2016.7899841,Template regularized sparse coding for face verification,2016 +270,CASIA Webface,casia_webface,42.8271556,-73.8780481,GE Global Research,company,8d3e95c31c93548b8c71dbeee2e9f7180067a888,citation,https://doi.org/10.1109/ICPR.2016.7899841,Template regularized sparse coding for face verification,2016 +271,CASIA Webface,casia_webface,41.70456775,-86.23822026,University of Notre Dame,edu,d6791b98353aa113d79f6fb96335aa6c7ea3b759,citation,https://doi.org/10.1109/TNNLS.2017.2648122,Collaborative Random Faces-Guided Encoders for Pose-Invariant Face Representation Learning,2018 +272,CASIA Webface,casia_webface,41.62772475,-71.00724501,University of Massachusetts Dartmouth,edu,d6791b98353aa113d79f6fb96335aa6c7ea3b759,citation,https://doi.org/10.1109/TNNLS.2017.2648122,Collaborative Random Faces-Guided Encoders for Pose-Invariant Face Representation Learning,2018 +273,CASIA Webface,casia_webface,42.3383668,-71.08793524,Northeastern University,edu,d6791b98353aa113d79f6fb96335aa6c7ea3b759,citation,https://doi.org/10.1109/TNNLS.2017.2648122,Collaborative Random Faces-Guided Encoders for Pose-Invariant Face Representation Learning,2018 +274,CASIA Webface,casia_webface,37.4102193,-122.05965487,Carnegie Mellon University,edu,4d16337cc0431cd43043dfef839ce5f0717c3483,citation,http://pdfs.semanticscholar.org/4d16/337cc0431cd43043dfef839ce5f0717c3483.pdf,A Scalable and Privacy-Aware IoT Service for Live Video Analytics,2017 +275,CASIA Webface,casia_webface,25.01353105,121.54173736,National Taiwan University of Science and Technology,edu,e4c3587392d477b7594086c6f28a00a826abf004,citation,https://doi.org/10.1109/ICIP.2017.8296998,Face recognition by facial attribute assisted network,2017 +276,CASIA Webface,casia_webface,41.70456775,-86.23822026,University of Notre Dame,edu,df2c685aa9c234783ab51c1aa1bf1cb5d71a3dbb,citation,https://arxiv.org/pdf/1704.06693.pdf,SREFI: Synthesis of realistic example face images,2017 +277,CASIA Webface,casia_webface,47.6423318,-122.1369302,Microsoft,company,0aebe97a92f590bdf21cdadfddec8061c682cdb2,citation,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2017.2695183,Probabilistic Elastic Part Model: A Pose-Invariant Representation for Real-World Face Verification,2018 +278,CASIA Webface,casia_webface,42.718568,-84.47791571,Michigan State University,edu,d29eec5e047560627c16803029d2eb8a4e61da75,citation,http://pdfs.semanticscholar.org/d29e/ec5e047560627c16803029d2eb8a4e61da75.pdf,Feature Transfer Learning for Deep Face Recognition with Long-Tail Data,2018 +279,CASIA Webface,casia_webface,40.8419836,-73.94368971,Columbia University,edu,35f03f5cbcc21a9c36c84e858eeb15c5d6722309,citation,http://doi.acm.org/10.1145/2964284.2970929,Placing Broadcast News Videos in their Social Media Context Using Hashtags,2016 +280,CASIA Webface,casia_webface,41.10427915,29.02231159,Istanbul Technical University,edu,fd53be2e0a9f33080a9db4b5a5e416e24ae8e198,citation,https://arxiv.org/pdf/1606.02909.pdf,Apparent Age Estimation Using Ensemble of Deep Learning Models,2016 +281,CASIA Webface,casia_webface,29.58333105,-98.61944505,University of Texas at San Antonio,edu,7788fa76f1488b1597ee2bebc462f628e659f61e,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8063888,A Privacy-Aware Architecture at the Edge for Autonomous Real-Time Identity Reidentification in Crowds,2018 +282,CASIA Webface,casia_webface,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,39c10888a470b92b917788c57a6fd154c97b421c,citation,https://doi.org/10.1109/VCIP.2017.8305036,Joint multi-feature fusion and attribute relationships for facial attribute prediction,2017 +283,CASIA Webface,casia_webface,51.49887085,-0.17560797,Imperial College London,edu,40bb090a4e303f11168dce33ed992f51afe02ff7,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Deng_Marginal_Loss_for_CVPR_2017_paper.pdf,Marginal Loss for Deep Face Recognition,2017 +284,CASIA Webface,casia_webface,31.83907195,117.26420748,University of Science and Technology of China,edu,3107316f243233d45e3c7e5972517d1ed4991f91,citation,http://arxiv.org/abs/1703.10155,CVAE-GAN: Fine-Grained Image Generation through Asymmetric Training,2017 +285,CASIA Webface,casia_webface,46.010737,8.958109,University of Lugano,edu,cae41c3d5508f57421faf672ee1bea0da4be66e0,citation,https://doi.org/10.1109/ICPR.2016.7900298,Palmprint recognition via discriminative index learning,2016 +286,CASIA Webface,casia_webface,39.2899685,-76.62196103,University of Maryland,edu,a3201e955d6607d383332f3a12a7befa08c5a18c,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900276,VLAD encoded Deep Convolutional features for unconstrained face verification,2016 +287,CASIA Webface,casia_webface,40.47913175,-74.43168868,Rutgers University,edu,a3201e955d6607d383332f3a12a7befa08c5a18c,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900276,VLAD encoded Deep Convolutional features for unconstrained face verification,2016 +288,CASIA Webface,casia_webface,45.7835966,4.7678948,École Centrale de Lyon,edu,486840f4f524e97f692a7f6b42cd19019ee71533,citation,https://arxiv.org/pdf/1703.08388v2.pdf,DeepVisage: Making Face Recognition Simple Yet With Powerful Generalization Skills,2017 +289,CASIA Webface,casia_webface,48.832493,2.267474,Safran Identity and Security,company,486840f4f524e97f692a7f6b42cd19019ee71533,citation,https://arxiv.org/pdf/1703.08388v2.pdf,DeepVisage: Making Face Recognition Simple Yet With Powerful Generalization Skills,2017 +290,CASIA Webface,casia_webface,32.1119889,34.80459702,Tel Aviv University,edu,2f16baddac6af536451b3216b02d3480fc361ef4,citation,http://cs.nyu.edu/~fergus/teaching/vision/10_facerec.pdf,Web-scale training for face identification,2015 +291,CASIA Webface,casia_webface,46.0501558,14.46907327,University of Ljubljana,edu,73f341ff68caa9f8802e9e81bfa90d88bbdbd9d2,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791198,Report on the BTAS 2016 Video Person Recognition Evaluation,2016 +292,CASIA Webface,casia_webface,41.70456775,-86.23822026,University of Notre Dame,edu,73f341ff68caa9f8802e9e81bfa90d88bbdbd9d2,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791198,Report on the BTAS 2016 Video Person Recognition Evaluation,2016 +293,CASIA Webface,casia_webface,-33.8809651,151.20107299,University of Technology Sydney,edu,73f341ff68caa9f8802e9e81bfa90d88bbdbd9d2,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791198,Report on the BTAS 2016 Video Person Recognition Evaluation,2016 +294,CASIA Webface,casia_webface,39.65404635,-79.96475355,West Virginia University,edu,73f341ff68caa9f8802e9e81bfa90d88bbdbd9d2,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791198,Report on the BTAS 2016 Video Person Recognition Evaluation,2016 +295,CASIA Webface,casia_webface,31.28473925,121.49694909,Tongji University,edu,fe0cf8eaa5a5f59225197ef1bb8613e603cd96d4,citation,https://pdfs.semanticscholar.org/4e20/8cfff33327863b5aeef0bf9b327798a5610c.pdf,Improved Face Verification with Simple Weighted Feature Combination,2017 +296,CASIA Webface,casia_webface,51.49887085,-0.17560797,Imperial College London,edu,c43ed9b34cad1a3976bac7979808eb038d88af84,citation,https://arxiv.org/pdf/1804.03675.pdf,Semi-supervised Adversarial Learning to Generate Photorealistic Face Images of New Identities from 3D Morphable Model,2018 +297,CASIA Webface,casia_webface,51.24303255,-0.59001382,University of Surrey,edu,c43ed9b34cad1a3976bac7979808eb038d88af84,citation,https://arxiv.org/pdf/1804.03675.pdf,Semi-supervised Adversarial Learning to Generate Photorealistic Face Images of New Identities from 3D Morphable Model,2018 +298,CASIA Webface,casia_webface,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,9627f28ea5f4c389350572b15968386d7ce3fe49,citation,https://arxiv.org/pdf/1802.07447.pdf,Load Balanced GANs for Multi-view Face Image Synthesis,2018 +299,CASIA Webface,casia_webface,23.09461185,113.28788994,Sun Yat-Sen University,edu,44f48a4b1ef94a9104d063e53bf88a69ff0f55f3,citation,http://pdfs.semanticscholar.org/44f4/8a4b1ef94a9104d063e53bf88a69ff0f55f3.pdf,Automatically Building Face Datasets of New Domains from Weakly Labeled Data with Pretrained Models,2016 +300,CASIA Webface,casia_webface,50.7791703,6.06728733,RWTH Aachen University,edu,6ce23cf4f440021b7b05aa3c1c2700cc7560b557,citation,http://pdfs.semanticscholar.org/6ce2/3cf4f440021b7b05aa3c1c2700cc7560b557.pdf,Learning Local Convolutional Features for Face Recognition with 2D-Warping,2016 +301,CASIA Webface,casia_webface,17.4454957,78.34854698,International Institute of Information Technology,edu,f5eb411217f729ad7ae84bfd4aeb3dedb850206a,citation,https://pdfs.semanticscholar.org/f5eb/411217f729ad7ae84bfd4aeb3dedb850206a.pdf,Tackling Low Resolution for Better Scene Understanding,2018 +302,CASIA Webface,casia_webface,40.47913175,-74.43168868,Rutgers University,edu,92e464a5a67582d5209fa75e3b29de05d82c7c86,citation,https://pdfs.semanticscholar.org/92e4/64a5a67582d5209fa75e3b29de05d82c7c86.pdf,Reconstruction for Feature Disentanglement in Pose-invariant Face Recognition,2017 +303,CASIA Webface,casia_webface,34.2474949,108.97898751,Xi'an Jiaotong University,edu,cd2f8d661ea2c6d6818a278eb4f0548751c3b1ae,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7945277,Improving CNN Performance Accuracies With Min–Max Objective,2018 +304,CASIA Webface,casia_webface,39.2899685,-76.62196103,University of Maryland,edu,0334a8862634988cc684dacd4279c5c0d03704da,citation,http://arxiv.org/abs/1609.06591,FaceNet2ExpNet: Regularizing a Deep Face Recognition Net for Expression Recognition,2017 +305,CASIA Webface,casia_webface,40.47913175,-74.43168868,Rutgers University,edu,31f1e711fcf82c855f27396f181bf5e565a2f58d,citation,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.54,Unconstrained Age Estimation with Deep Convolutional Neural Networks,2015 +306,CASIA Webface,casia_webface,39.2899685,-76.62196103,University of Maryland,edu,31f1e711fcf82c855f27396f181bf5e565a2f58d,citation,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.54,Unconstrained Age Estimation with Deep Convolutional Neural Networks,2015 +307,CASIA Webface,casia_webface,22.5447154,113.9357164,Tencent,company,a2d1818eb461564a5153c74028e53856cf0b40fd,citation,https://arxiv.org/pdf/1810.07599.pdf,Orthogonal Deep Features Decomposition for Age-Invariant Face Recognition,2018 +308,CASIA Webface,casia_webface,53.21967825,6.56251482,University of Groningen,edu,4c8ef4f98c6c8d340b011cfa0bb65a9377107970,citation,http://pdfs.semanticscholar.org/4c8e/f4f98c6c8d340b011cfa0bb65a9377107970.pdf,Sentiment Recognition in Egocentric Photostreams,2017 +309,CASIA Webface,casia_webface,41.3868913,2.16352385,University of Barcelona,edu,4c8ef4f98c6c8d340b011cfa0bb65a9377107970,citation,http://pdfs.semanticscholar.org/4c8e/f4f98c6c8d340b011cfa0bb65a9377107970.pdf,Sentiment Recognition in Egocentric Photostreams,2017 +310,CASIA Webface,casia_webface,65.0592157,25.46632601,University of Oulu,edu,035c8632c1ffbeb75efe16a4ec50c91e20e6e189,citation,http://doi.org/10.1007/s00138-018-0943-x,Kinship verification from facial images and videos: human versus machine,2018 diff --git a/site/datasets/final/cofw.csv b/site/datasets/final/cofw.csv new file mode 100644 index 00000000..3b50c56d --- /dev/null +++ b/site/datasets/final/cofw.csv @@ -0,0 +1,233 @@ +index,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,COFW,cofw,0.0,0.0,,,2724ba85ec4a66de18da33925e537f3902f21249,main,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298,Robust Face Landmark Estimation under Occlusion,2013 +1,COFW,cofw,23.04436505,113.36668458,Guangzhou University,edu,293d69d042fe9bc4fea256c61915978ddaf7cc92,citation,https://doi.org/10.1007/978-981-10-7302-1_6,Face Recognition by Coarse-to-Fine Landmark Regression with Application to ATM Surveillance,2017 +2,COFW,cofw,23.09461185,113.28788994,Sun Yat-Sen University,edu,293d69d042fe9bc4fea256c61915978ddaf7cc92,citation,https://doi.org/10.1007/978-981-10-7302-1_6,Face Recognition by Coarse-to-Fine Landmark Regression with Application to ATM Surveillance,2017 +3,COFW,cofw,32.87935255,-117.23110049,"University of California, San Diego",edu,d68dbb71b34dfe98dee0680198a23d3b53056394,citation,http://pdfs.semanticscholar.org/d68d/bb71b34dfe98dee0680198a23d3b53056394.pdf,VIVA Face-off Challenge: Dataset Creation and Balancing Privacy,2015 +4,COFW,cofw,40.0044795,116.370238,Chinese Academy of Sciences,edu,c2474202d56bb80663e7bece5924245978425fc1,citation,https://doi.org/10.1109/ICIP.2016.7532771,Localize heavily occluded human faces via deep segmentation,2016 +5,COFW,cofw,31.83907195,117.26420748,University of Science and Technology of China,edu,a7a3ec1128f920066c25cb86fbc33445ce613919,citation,https://doi.org/10.1109/VCIP.2017.8305115,Joint facial landmark detection and action estimation based on deep probabilistic random forest,2017 +6,COFW,cofw,42.9336278,-78.88394479,SUNY Buffalo,edu,a7a3ec1128f920066c25cb86fbc33445ce613919,citation,https://doi.org/10.1109/VCIP.2017.8305115,Joint facial landmark detection and action estimation based on deep probabilistic random forest,2017 +7,COFW,cofw,42.718568,-84.47791571,Michigan State University,edu,0141cb33c822e87e93b0c1bad0a09db49b3ad470,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298876,Unconstrained 3D face reconstruction,2015 +8,COFW,cofw,22.2081469,114.25964115,University of Hong Kong,edu,fb87045600da73b07f0757f345a937b1c8097463,citation,https://pdfs.semanticscholar.org/5c54/2fef80a35a4f930e5c82040b52c58e96ce87.pdf,Reflective Regression of 2D-3D Face Shape Across Large Pose,2016 +9,COFW,cofw,1.2962018,103.77689944,National University of Singapore,edu,1fe59275142844ce3ade9e2aed900378dd025880,citation,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w25/papers/Xiao_Facial_Landmark_Detection_ICCV_2015_paper.pdf,Facial Landmark Detection via Progressive Initialization,2015 +10,COFW,cofw,42.7298459,-73.67950216,Rensselaer Polytechnic Institute,edu,171d8a39b9e3d21231004f7008397d5056ff23af,citation,http://arxiv.org/abs/1709.08130,"Simultaneous Facial Landmark Detection, Pose and Deformation Estimation Under Facial Occlusion",2017 +11,COFW,cofw,52.17638955,0.14308882,University of Cambridge,edu,4ae291b070ad7940b3c9d3cb10e8c05955c9e269,citation,http://www.cl.cam.ac.uk/~pr10/publications/icmi14.pdf,Automatic Detection of Naturalistic Hand-over-Face Gesture Descriptors,2014 +12,COFW,cofw,39.9041999,116.4073963,"360 AI Institute, Beijing, China",company,54f169ad7d1f6c9ce94381e9b5ccc1a07fd49cc6,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7911334,Towards Robust and Accurate Multi-View and Partially-Occluded Face Alignment,2018 +13,COFW,cofw,51.2352438,7.1593132,Delphi Deutschland GMBH,company,54f169ad7d1f6c9ce94381e9b5ccc1a07fd49cc6,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7911334,Towards Robust and Accurate Multi-View and Partially-Occluded Face Alignment,2018 +14,COFW,cofw,40.0044795,116.370238,Chinese Academy of Sciences,edu,54f169ad7d1f6c9ce94381e9b5ccc1a07fd49cc6,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7911334,Towards Robust and Accurate Multi-View and Partially-Occluded Face Alignment,2018 +15,COFW,cofw,40.0044795,116.370238,Chinese Academy of Sciences,edu,bc910ca355277359130da841a589a36446616262,citation,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Huang_Conditional_High-Order_Boltzmann_ICCV_2015_paper.pdf,Conditional High-Order Boltzmann Machine: A Supervised Learning Model for Relation Learning,2015 +16,COFW,cofw,29.7207902,-95.34406271,University of Houston,edu,466f80b066215e85da63e6f30e276f1a9d7c843b,citation,http://doi.ieeecomputersociety.org/10.1109/FG.2017.81,Joint Head Pose Estimation and Face Alignment Framework Using Global and Local CNN Features,2017 +17,COFW,cofw,37.4102193,-122.05965487,Carnegie Mellon University,edu,3146fabd5631a7d1387327918b184103d06c2211,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w18/papers/Jeni_Person-Independent_3D_Gaze_CVPR_2016_paper.pdf,Person-Independent 3D Gaze Estimation Using Face Frontalization,2016 +18,COFW,cofw,40.44415295,-79.96243993,University of Pittsburgh,edu,3146fabd5631a7d1387327918b184103d06c2211,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w18/papers/Jeni_Person-Independent_3D_Gaze_CVPR_2016_paper.pdf,Person-Independent 3D Gaze Estimation Using Face Frontalization,2016 +19,COFW,cofw,42.718568,-84.47791571,Michigan State University,edu,b53485dbdd2dc5e4f3c7cff26bd8707964bb0503,citation,http://doi.org/10.1007/s11263-017-1012-z,Pose-Invariant Face Alignment via CNN-Based Dense 3D Model Fitting,2017 +20,COFW,cofw,38.83133325,-77.30798839,George Mason University,edu,a9426cb98c8aedf79ea19839643a7cf1e435aeaa,citation,https://doi.org/10.1109/GlobalSIP.2016.7905998,Cascaded regression for 3D pose estimation for mouse in fisheye lens distorted monocular images,2016 +21,COFW,cofw,39.00041165,-77.10327775,National Institutes of Health,edu,a9426cb98c8aedf79ea19839643a7cf1e435aeaa,citation,https://doi.org/10.1109/GlobalSIP.2016.7905998,Cascaded regression for 3D pose estimation for mouse in fisheye lens distorted monocular images,2016 +22,COFW,cofw,41.3861759,2.1248717,"Transmural Biotech, Barcelona, Spain",edu,a9426cb98c8aedf79ea19839643a7cf1e435aeaa,citation,https://doi.org/10.1109/GlobalSIP.2016.7905998,Cascaded regression for 3D pose estimation for mouse in fisheye lens distorted monocular images,2016 +23,COFW,cofw,26.88111275,112.62850666,Hunan University,edu,1fe1a78c941e03abe942498249c041b2703fd3d2,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393355,Face alignment based on improved shape searching,2017 +24,COFW,cofw,32.8164178,130.72703969,Kumamoto University,edu,7aafeb9aab48fb2c34bed4b86755ac71e3f00338,citation,http://pdfs.semanticscholar.org/7aaf/eb9aab48fb2c34bed4b86755ac71e3f00338.pdf,Real Time 3D Facial Movement Tracking Using a Monocular Camera,2016 +25,COFW,cofw,31.28473925,121.49694909,Tongji University,edu,7aafeb9aab48fb2c34bed4b86755ac71e3f00338,citation,http://pdfs.semanticscholar.org/7aaf/eb9aab48fb2c34bed4b86755ac71e3f00338.pdf,Real Time 3D Facial Movement Tracking Using a Monocular Camera,2016 +26,COFW,cofw,32.8164178,130.72703969,Kumamoto University,edu,6fdf2f4f7ae589af6016305a17d460617d9ef345,citation,https://doi.org/10.1109/ICIP.2015.7350767,Robust facial landmark localization using multi partial features,2015 +27,COFW,cofw,31.28473925,121.49694909,Tongji University,edu,6fdf2f4f7ae589af6016305a17d460617d9ef345,citation,https://doi.org/10.1109/ICIP.2015.7350767,Robust facial landmark localization using multi partial features,2015 +28,COFW,cofw,31.21051105,29.91314562,Alexandria University,edu,9a4c45e5c6e4f616771a7325629d167a38508691,citation,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W02/papers/Mostafa_A_Facial_Features_2015_CVPR_paper.pdf,A facial features detector integrating holistic facial information and part-based model,2015 +29,COFW,cofw,27.18794105,31.17009498,Assiut University,edu,9a4c45e5c6e4f616771a7325629d167a38508691,citation,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W02/papers/Mostafa_A_Facial_Features_2015_CVPR_paper.pdf,A facial features detector integrating holistic facial information and part-based model,2015 +30,COFW,cofw,38.2167565,-85.75725023,University of Louisville,edu,9a4c45e5c6e4f616771a7325629d167a38508691,citation,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W02/papers/Mostafa_A_Facial_Features_2015_CVPR_paper.pdf,A facial features detector integrating holistic facial information and part-based model,2015 +31,COFW,cofw,37.5901411,127.0362318,Korea University,edu,5957936195c10521dadc9b90ca9b159eb1fc4871,citation,https://doi.org/10.1109/TCE.2016.7838098,LBP-ferns-based feature extraction for robust facial recognition,2016 +32,COFW,cofw,33.5866784,-101.87539204,Electrical and Computer Engineering,edu,ebb3d5c70bedf2287f9b26ac0031004f8f617b97,citation,https://doi.org/10.1109/MSP.2017.2764116,"Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans",2018 +33,COFW,cofw,39.2899685,-76.62196103,University of Maryland,edu,ebb3d5c70bedf2287f9b26ac0031004f8f617b97,citation,https://doi.org/10.1109/MSP.2017.2764116,"Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans",2018 +34,COFW,cofw,40.0141905,-83.0309143,University of Electronic Science and Technology of China,edu,21a2f67b21905ff6e0afa762937427e92dc5aa0b,citation,http://pdfs.semanticscholar.org/21a2/f67b21905ff6e0afa762937427e92dc5aa0b.pdf,Extra Facial Landmark Localization via Global Shape Reconstruction,2017 +35,COFW,cofw,40.0141905,-83.0309143,University of Electronic Science and Technology of China,edu,88e2574af83db7281c2064e5194c7d5dfa649846,citation,http://pdfs.semanticscholar.org/88e2/574af83db7281c2064e5194c7d5dfa649846.pdf,A Robust Shape Reconstruction Method for Facial Feature Point Detection,2017 +36,COFW,cofw,29.7207902,-95.34406271,University of Houston,edu,607aebe7568407421e8ffc7b23a5fda52650ad93,citation,https://doi.org/10.1109/ISBA.2016.7477237,Face alignment via an ensemble of random ferns,2016 +37,COFW,cofw,-27.49741805,153.01316956,University of Queensland,edu,710c3aaffef29730ffd909a63798e9185f488327,citation,https://doi.org/10.1109/ICPR.2016.7900095,The GIST of aligning faces,2016 +38,COFW,cofw,32.7283683,-97.11201835,University of Texas at Arlington,edu,411dc8874fd7b3a9a4c1fd86bb5b583788027776,citation,https://pdfs.semanticscholar.org/701f/56f0eac9f88387de1f556acef78016b05d52.pdf,Direct Shape Regression Networks for End-to-End Face Alignment,2018 +39,COFW,cofw,34.1235825,108.83546,Xidian University,edu,411dc8874fd7b3a9a4c1fd86bb5b583788027776,citation,https://pdfs.semanticscholar.org/701f/56f0eac9f88387de1f556acef78016b05d52.pdf,Direct Shape Regression Networks for End-to-End Face Alignment,2018 +40,COFW,cofw,30.44235995,-84.29747867,Florida State University,edu,1ed6c7e02b4b3ef76f74dd04b2b6050faa6e2177,citation,http://pdfs.semanticscholar.org/6433/c412149382418ccd8aa966aa92973af41671.pdf,Face Detection with a 3D Model,2014 +41,COFW,cofw,39.00041165,-77.10327775,National Institutes of Health,edu,1ed6c7e02b4b3ef76f74dd04b2b6050faa6e2177,citation,http://pdfs.semanticscholar.org/6433/c412149382418ccd8aa966aa92973af41671.pdf,Face Detection with a 3D Model,2014 +42,COFW,cofw,32.87935255,-117.23110049,"University of California, San Diego",edu,43776d1bfa531e66d5e9826ff5529345b792def7,citation,http://cvrr.ucsd.edu/scmartin/presentation/DriveAnalysisByLookingIn-ITSC2015-NDS.pdf,Automatic Critical Event Extraction and Semantic Interpretation by Looking-Inside,2015 +43,COFW,cofw,38.99203005,-76.9461029,University of Maryland College Park,edu,f7824758800a7b1a386db5bd35f84c81454d017a,citation,https://arxiv.org/pdf/1702.05085.pdf,KEPLER: Keypoint and Pose Estimation of Unconstrained Faces by Learning Efficient H-CNN Regressors,2017 +44,COFW,cofw,38.99203005,-76.9461029,University of Maryland College Park,edu,ceeb67bf53ffab1395c36f1141b516f893bada27,citation,http://pdfs.semanticscholar.org/ceeb/67bf53ffab1395c36f1141b516f893bada27.pdf,Face Alignment by Local Deep Descriptor Regression,2016 +45,COFW,cofw,40.47913175,-74.43168868,Rutgers University,edu,ceeb67bf53ffab1395c36f1141b516f893bada27,citation,http://pdfs.semanticscholar.org/ceeb/67bf53ffab1395c36f1141b516f893bada27.pdf,Face Alignment by Local Deep Descriptor Regression,2016 +46,COFW,cofw,39.2899685,-76.62196103,University of Maryland,edu,ceeb67bf53ffab1395c36f1141b516f893bada27,citation,http://pdfs.semanticscholar.org/ceeb/67bf53ffab1395c36f1141b516f893bada27.pdf,Face Alignment by Local Deep Descriptor Regression,2016 +47,COFW,cofw,1.2962018,103.77689944,National University of Singapore,edu,3be8f1f7501978287af8d7ebfac5963216698249,citation,https://pdfs.semanticscholar.org/3be8/f1f7501978287af8d7ebfac5963216698249.pdf,Deep Cascaded Regression for Face Alignment,2015 +48,COFW,cofw,23.09461185,113.28788994,Sun Yat-Sen University,edu,3be8f1f7501978287af8d7ebfac5963216698249,citation,https://pdfs.semanticscholar.org/3be8/f1f7501978287af8d7ebfac5963216698249.pdf,Deep Cascaded Regression for Face Alignment,2015 +49,COFW,cofw,51.7534538,-1.25400997,University of Oxford,edu,a3d0ebb50d49116289fb176d28ea98a92badada6,citation,https://pdfs.semanticscholar.org/a3d0/ebb50d49116289fb176d28ea98a92badada6.pdf,Unsupervised Learning of Object Landmarks through Conditional Image Generation,2018 +50,COFW,cofw,55.94951105,-3.19534913,University of Edinburgh,edu,a3d0ebb50d49116289fb176d28ea98a92badada6,citation,https://pdfs.semanticscholar.org/a3d0/ebb50d49116289fb176d28ea98a92badada6.pdf,Unsupervised Learning of Object Landmarks through Conditional Image Generation,2018 +51,COFW,cofw,30.642769,104.06751175,"Sichuan University, Chengdu",edu,a0aa32bb7f406693217fba6dcd4aeb6c4d5a479b,citation,https://pdfs.semanticscholar.org/a0aa/32bb7f406693217fba6dcd4aeb6c4d5a479b.pdf,Cascaded Regressor based 3D Face Reconstruction from a Single Arbitrary View Image,2015 +52,COFW,cofw,25.01353105,121.54173736,National Taiwan University of Science and Technology,edu,deb89950939ae9847f0a1a4bb198e6dbfed62778,citation,https://doi.org/10.1109/LSP.2016.2543019,Accurate Facial Landmark Extraction,2016 +53,COFW,cofw,3.12267405,101.65356103,University of Malaya,edu,deb89950939ae9847f0a1a4bb198e6dbfed62778,citation,https://doi.org/10.1109/LSP.2016.2543019,Accurate Facial Landmark Extraction,2016 +54,COFW,cofw,37.4102193,-122.05965487,Carnegie Mellon University,edu,78598e7005f7c96d64cc47ff47e6f13ae52245b8,citation,https://arxiv.org/pdf/1708.00370.pdf,Hand2Face: Automatic synthesis and recognition of hand over face occlusions,2017 +55,COFW,cofw,28.59899755,-81.19712501,University of Central Florida,edu,78598e7005f7c96d64cc47ff47e6f13ae52245b8,citation,https://arxiv.org/pdf/1708.00370.pdf,Hand2Face: Automatic synthesis and recognition of hand over face occlusions,2017 +56,COFW,cofw,52.17638955,0.14308882,University of Cambridge,edu,9901f473aeea177a55e58bac8fd4f1b086e575a4,citation,https://arxiv.org/pdf/1509.04954.pdf,Human and sheep facial landmarks localisation by triplet interpolated features,2016 +57,COFW,cofw,40.00229045,116.32098908,Tsinghua University,edu,e4fa062bff299a0bcef9f6b2e593c85be116c9f1,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7407641,Cascaded Elastically Progressive Model for Accurate Face Alignment,2017 +58,COFW,cofw,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,5c820e47981d21c9dddde8d2f8020146e600368f,citation,http://pdfs.semanticscholar.org/5c82/0e47981d21c9dddde8d2f8020146e600368f.pdf,Extended Supervised Descent Method for Robust Face Alignment,2014 +59,COFW,cofw,51.49887085,-0.17560797,Imperial College London,edu,29c340c83b3bbef9c43b0c50b4d571d5ed037cbd,citation,https://pdfs.semanticscholar.org/29c3/40c83b3bbef9c43b0c50b4d571d5ed037cbd.pdf,Stacked Dense U-Nets with Dual Transformers for Robust Face Alignment,2018 +60,COFW,cofw,30.19331415,120.11930822,Zhejiang University,edu,5213549200bccec57232fc3ff788ddf1043af7b3,citation,http://doi.acm.org/10.1145/2601097.2601204,Displaced dynamic expression regression for real-time facial tracking and animation,2014 +61,COFW,cofw,51.49887085,-0.17560797,Imperial College London,edu,034b3f3bac663fb814336a69a9fd3514ca0082b9,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298991,Unifying holistic and Parts-Based Deformable Model fitting,2015 +62,COFW,cofw,50.74223495,-1.89433739,Bournemouth University,edu,91f0a95b8eb76e8fa24c8267e4a7a17815fc7a11,citation,http://doi.org/10.1007/s41095-016-0068-y,Robust facial landmark detection and tracking across poses and expressions for in-the-wild monocular video,2016 +63,COFW,cofw,45.7413921,126.62552755,Harbin Institute of Technology,edu,91f0a95b8eb76e8fa24c8267e4a7a17815fc7a11,citation,http://doi.org/10.1007/s41095-016-0068-y,Robust facial landmark detection and tracking across poses and expressions for in-the-wild monocular video,2016 +64,COFW,cofw,39.9808333,116.34101249,Beihang University,edu,86b6afc667bb14ff4d69e7a5e8bb2454a6bbd2cd,citation,https://pdfs.semanticscholar.org/86b6/afc667bb14ff4d69e7a5e8bb2454a6bbd2cd.pdf,Attentional Alignment Networks,2018 +65,COFW,cofw,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,86b6afc667bb14ff4d69e7a5e8bb2454a6bbd2cd,citation,https://pdfs.semanticscholar.org/86b6/afc667bb14ff4d69e7a5e8bb2454a6bbd2cd.pdf,Attentional Alignment Networks,2018 +66,COFW,cofw,32.7283683,-97.11201835,University of Texas at Arlington,edu,86b6afc667bb14ff4d69e7a5e8bb2454a6bbd2cd,citation,https://pdfs.semanticscholar.org/86b6/afc667bb14ff4d69e7a5e8bb2454a6bbd2cd.pdf,Attentional Alignment Networks,2018 +67,COFW,cofw,40.0044795,116.370238,Chinese Academy of Sciences,edu,22e2066acfb795ac4db3f97d2ac176d6ca41836c,citation,http://pdfs.semanticscholar.org/26f5/3a1abb47b1f0ea1f213dc7811257775dc6e6.pdf,Coarse-to-Fine Auto-Encoder Networks (CFAN) for Real-Time Face Alignment,2014 +68,COFW,cofw,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,22e2066acfb795ac4db3f97d2ac176d6ca41836c,citation,http://pdfs.semanticscholar.org/26f5/3a1abb47b1f0ea1f213dc7811257775dc6e6.pdf,Coarse-to-Fine Auto-Encoder Networks (CFAN) for Real-Time Face Alignment,2014 +69,COFW,cofw,43.13800205,-75.22943591,SUNY Polytechnic Institute,edu,69b18d62330711bfd7f01a45f97aaec71e9ea6a5,citation,http://pdfs.semanticscholar.org/69b1/8d62330711bfd7f01a45f97aaec71e9ea6a5.pdf,M-Track: A New Software for Automated Detection of Grooming Trajectories in Mice,2016 +70,COFW,cofw,-30.0338248,-51.218828,Federal University of Rio Grande do Sul,edu,fa08b52dda21ccf71ebc91bc0c4d206ac0aa3719,citation,https://doi.org/10.1109/TIM.2015.2415012,Customized Orthogonal Locality Preserving Projections With Soft-Margin Maximization for Face Recognition,2015 +71,COFW,cofw,-28.234493,-52.38044,University of Passo Fundo,edu,fa08b52dda21ccf71ebc91bc0c4d206ac0aa3719,citation,https://doi.org/10.1109/TIM.2015.2415012,Customized Orthogonal Locality Preserving Projections With Soft-Margin Maximization for Face Recognition,2015 +72,COFW,cofw,34.0224149,-118.28634407,University of Southern California,edu,632b24ddd42fda4aebc5a8af3ec44f7fd3ecdc6c,citation,https://arxiv.org/pdf/1604.02647.pdf,Real-Time Facial Segmentation and Performance Capture from RGB Input,2016 +73,COFW,cofw,22.42031295,114.20788644,Chinese University of Hong Kong,edu,329d58e8fb30f1bf09acb2f556c9c2f3e768b15c,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Wu_Leveraging_Intra_and_CVPR_2017_paper.pdf,Leveraging Intra and Inter-Dataset Variations for Robust Face Alignment,2017 +74,COFW,cofw,40.00229045,116.32098908,Tsinghua University,edu,329d58e8fb30f1bf09acb2f556c9c2f3e768b15c,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Wu_Leveraging_Intra_and_CVPR_2017_paper.pdf,Leveraging Intra and Inter-Dataset Variations for Robust Face Alignment,2017 +75,COFW,cofw,43.07982815,-89.43066425,University of Wisconsin Madison,edu,77fbbf0c5729f97fcdbfdc507deee3d388cd4889,citation,https://pdfs.semanticscholar.org/ec7f/c7bf79204166f78c27e870b620205751fff6.pdf,Pose-Robust 3D Facial Landmark Estimation from a Single 2D Image,2016 +76,COFW,cofw,36.3697191,127.362537,Korea Advanced Institute of Science and Technology,edu,72e10a2a7a65db7ecdc7d9bd3b95a4160fab4114,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_094_ext.pdf,Face alignment using cascade Gaussian process regression trees,2015 +77,COFW,cofw,-33.88890695,151.18943366,University of Sydney,edu,58d43e32660446669ff54f29658961fe8bb6cc72,citation,https://doi.org/10.1109/ISBI.2017.7950504,Automatic detection of obstructive sleep apnea using facial images,2017 +78,COFW,cofw,52.3793131,-1.5604252,University of Warwick,edu,0bc53b338c52fc635687b7a6c1e7c2b7191f42e5,citation,http://pdfs.semanticscholar.org/a32a/8d6d4c3b4d69544763be48ffa7cb0d7f2f23.pdf,Loglet SIFT for Part Description in Deformable Part Models: Application to Face Alignment,2016 +79,COFW,cofw,40.51865195,-74.44099801,State University of New Jersey,edu,bbc5f4052674278c96abe7ff9dc2d75071b6e3f3,citation,https://pdfs.semanticscholar.org/287b/7baff99d6995fd5852002488eb44659be6c1.pdf,Nonlinear Hierarchical Part-Based Regression for Unconstrained Face Alignment,2016 +80,COFW,cofw,30.5097537,114.4062881,Huazhong University of Science and Technology,edu,7f2a4cd506fe84dee26c0fb41848cb219305173f,citation,http://pdfs.semanticscholar.org/7f2a/4cd506fe84dee26c0fb41848cb219305173f.pdf,Face Detection and Pose Estimation Based on Evaluating Facial Feature Selection,2015 +81,COFW,cofw,32.77824165,34.99565673,Open University of Israel,edu,0a34fe39e9938ae8c813a81ae6d2d3a325600e5c,citation,https://arxiv.org/pdf/1708.07517.pdf,FacePoseNet: Making a Case for Landmark-Free Face Alignment,2017 +82,COFW,cofw,23.09461185,113.28788994,Sun Yat-Sen University,edu,4c078c2919c7bdc26ca2238fa1a79e0331898b56,citation,http://pdfs.semanticscholar.org/4c07/8c2919c7bdc26ca2238fa1a79e0331898b56.pdf,Unconstrained Facial Landmark Localization with Backbone-Branches Fully-Convolutional Networks,2015 +83,COFW,cofw,34.0224149,-118.28634407,University of Southern California,edu,43e99b76ca8e31765d4571d609679a689afdc99e,citation,http://arxiv.org/abs/1709.00536,Learning Dense Facial Correspondences in Unconstrained Images,2017 +84,COFW,cofw,32.0565957,118.77408833,Nanjing University,edu,46b2ecef197b465abc43e0e017543b1af61921ac,citation,https://doi.org/10.1109/ICPR.2016.7899652,Face alignment with Cascaded Bidirectional LSTM Neural Networks,2016 +85,COFW,cofw,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,c00f402b9cfc3f8dd2c74d6b3552acbd1f358301,citation,http://pdfs.semanticscholar.org/c00f/402b9cfc3f8dd2c74d6b3552acbd1f358301.pdf,Learning deep representation from coarse to fine for face alignment,2016 +86,COFW,cofw,17.4454957,78.34854698,International Institute of Information Technology,edu,185263189a30986e31566394680d6d16b0089772,citation,https://pdfs.semanticscholar.org/1852/63189a30986e31566394680d6d16b0089772.pdf,Efficient Annotation of Objects for Video Analysis,2018 +87,COFW,cofw,-33.8809651,151.20107299,University of Technology Sydney,edu,ebc2a3e8a510c625353637e8e8f07bd34410228f,citation,https://doi.org/10.1109/TIP.2015.2502485,Dual Sparse Constrained Cascade Regression for Robust Face Alignment,2016 +88,COFW,cofw,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,1922ad4978ab92ce0d23acc4c7441a8812f157e5,citation,http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2015_alignment.pdf,Face alignment by coarse-to-fine shape searching,2015 +89,COFW,cofw,22.42031295,114.20788644,Chinese University of Hong Kong,edu,1922ad4978ab92ce0d23acc4c7441a8812f157e5,citation,http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2015_alignment.pdf,Face alignment by coarse-to-fine shape searching,2015 +90,COFW,cofw,34.0224149,-118.28634407,University of Southern California,edu,53e081f5af505374c3b8491e9c4470fe77fe7934,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Hsieh_Unconstrained_Realtime_Facial_2015_CVPR_paper.pdf,Unconstrained realtime facial performance capture,2015 +91,COFW,cofw,39.9922379,116.30393816,Peking University,edu,11ba01ce7d606bab5c2d7e998c6d94325521b8a0,citation,https://doi.org/10.1109/ICIP.2015.7350911,Regression based landmark estimation and multi-feature fusion for visual speech recognition,2015 +92,COFW,cofw,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,b11bb6bd63ee6f246d278dd4edccfbe470263803,citation,http://pdfs.semanticscholar.org/b11b/b6bd63ee6f246d278dd4edccfbe470263803.pdf,Joint Voxel and Coordinate Regression for Accurate 3D Facial Landmark Localization,2018 +93,COFW,cofw,42.718568,-84.47791571,Michigan State University,edu,86204fc037936754813b91898377e8831396551a,citation,https://arxiv.org/pdf/1709.01442.pdf,Dense Face Alignment,2017 +94,COFW,cofw,43.07982815,-89.43066425,University of Wisconsin Madison,edu,0eac652139f7ab44ff1051584b59f2dc1757f53b,citation,http://pdfs.semanticscholar.org/0eac/652139f7ab44ff1051584b59f2dc1757f53b.pdf,Efficient Branching Cascaded Regression for Face Alignment under Significant Head Rotation,2016 +95,COFW,cofw,39.9586652,116.30971281,Beijing Institute of Technology,edu,0ea7b7fff090c707684fd4dc13e0a8f39b300a97,citation,http://arxiv.org/abs/1711.06055,Integrated Face Analytics Networks through Cross-Dataset Hybrid Training,2017 +96,COFW,cofw,33.5866784,-101.87539204,Electrical and Computer Engineering,edu,0ea7b7fff090c707684fd4dc13e0a8f39b300a97,citation,http://arxiv.org/abs/1711.06055,Integrated Face Analytics Networks through Cross-Dataset Hybrid Training,2017 +97,COFW,cofw,1.2962018,103.77689944,National University of Singapore,edu,0ea7b7fff090c707684fd4dc13e0a8f39b300a97,citation,http://arxiv.org/abs/1711.06055,Integrated Face Analytics Networks through Cross-Dataset Hybrid Training,2017 +98,COFW,cofw,-26.1888813,28.02479073,University of the Witwatersrand,edu,aa4af9b3811db6a30e1c7cc1ebf079078c1ee152,citation,http://doi.acm.org/10.1145/3129416.3129451,Deformable part models with CNN features for facial landmark detection under occlusion,2017 +99,COFW,cofw,40.0044795,116.370238,Chinese Academy of Sciences,edu,303a7099c01530fa0beb197eb1305b574168b653,citation,http://openaccess.thecvf.com/content_cvpr_2016/papers/Zhang_Occlusion-Free_Face_Alignment_CVPR_2016_paper.pdf,Occlusion-Free Face Alignment: Deep Regression Networks Coupled with De-Corrupt AutoEncoders,2016 +100,COFW,cofw,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,303a7099c01530fa0beb197eb1305b574168b653,citation,http://openaccess.thecvf.com/content_cvpr_2016/papers/Zhang_Occlusion-Free_Face_Alignment_CVPR_2016_paper.pdf,Occlusion-Free Face Alignment: Deep Regression Networks Coupled with De-Corrupt AutoEncoders,2016 +101,COFW,cofw,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,b5da4943c348a6b4c934c2ea7330afaf1d655e79,citation,http://pdfs.semanticscholar.org/b5da/4943c348a6b4c934c2ea7330afaf1d655e79.pdf,Facial Landmarks Detection by Self-Iterative Regression based Landmarks-Attention Network,2018 +102,COFW,cofw,40.0044795,116.370238,Chinese Academy of Sciences,edu,a73405038fdc0d8bf986539ef755a80ebd341e97,citation,https://doi.org/10.1109/TIP.2017.2698918,Conditional High-Order Boltzmann Machines for Supervised Relation Learning,2017 +103,COFW,cofw,34.13710185,-118.12527487,California Institute of Technology,edu,11aa527c01e61ec3a7a67eef8d7ffe9d9ce63f1d,citation,http://pdfs.semanticscholar.org/11aa/527c01e61ec3a7a67eef8d7ffe9d9ce63f1d.pdf,"Automated measurement of mouse social behaviors using depth sensing, video tracking, and machine learning.",2015 +104,COFW,cofw,51.24303255,-0.59001382,University of Surrey,edu,56e25056153a15eae2a6b10c109f812d2b753cee,citation,https://arxiv.org/pdf/1711.06753.pdf,Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks,2017 +105,COFW,cofw,31.4854255,120.2739581,Jiangnan University,edu,56e25056153a15eae2a6b10c109f812d2b753cee,citation,https://arxiv.org/pdf/1711.06753.pdf,Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks,2017 +106,COFW,cofw,37.4102193,-122.05965487,Carnegie Mellon University,edu,f1b4583c576d6d8c661b4b2c82bdebf3ba3d7e53,citation,https://arxiv.org/pdf/1707.05653.pdf,Faster than Real-Time Facial Alignment: A 3D Spatial Transformer Network Approach in Unconstrained Poses,2017 +107,COFW,cofw,40.00229045,116.32098908,Tsinghua University,edu,445e3ba7eabcc55b5d24f951b029196b47830684,citation,https://doi.org/10.1109/TMM.2016.2591508,Learning Cascaded Deep Auto-Encoder Networks for Face Alignment,2016 +108,COFW,cofw,1.3484104,103.68297965,Nanyang Technological University,edu,445e3ba7eabcc55b5d24f951b029196b47830684,citation,https://doi.org/10.1109/TMM.2016.2591508,Learning Cascaded Deep Auto-Encoder Networks for Face Alignment,2016 +109,COFW,cofw,38.99203005,-76.9461029,University of Maryland College Park,edu,1389ba6c3ff34cdf452ede130c738f37dca7e8cb,citation,http://pdfs.semanticscholar.org/1389/ba6c3ff34cdf452ede130c738f37dca7e8cb.pdf,A Convolution Tree with Deconvolution Branches: Exploiting Geometric Relationships for Single Shot Keypoint Detection,2017 +110,COFW,cofw,32.87935255,-117.23110049,"University of California, San Diego",edu,b806a31c093b31e98cc5fca7e3ec53f2cc169db9,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7995928,Gaze fixations and dynamics for behavior modeling and prediction of on-road driving maneuvers,2017 +111,COFW,cofw,37.4102193,-122.05965487,Carnegie Mellon University,edu,4140498e96a5ff3ba816d13daf148fffb9a2be3f,citation,http://multicomp.cs.cmu.edu/wp-content/uploads/2017/10/2017_FG_Li_Constrained.pdf,Constrained Ensemble Initialization for Facial Landmark Tracking in Video,2017 +112,COFW,cofw,38.99203005,-76.9461029,University of Maryland College Park,edu,f8e64dd25c3174dff87385db56abc48101b69009,citation,https://arxiv.org/pdf/1802.06713.pdf,Disentangling 3D Pose in A Dendritic CNN for Unconstrained 2D Face Alignment,2018 +113,COFW,cofw,43.7047927,-72.2925909,Dartmouth College,edu,df71a00071d5a949f9c31371c2e5ee8b478e7dc8,citation,http://studentlife.cs.dartmouth.edu/facelogging.pdf,Using opportunistic face logging from smartphone to infer mental health: challenges and future directions,2015 +114,COFW,cofw,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,2c17d36bab56083293456fe14ceff5497cc97d75,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhu_Unconstrained_Face_Alignment_CVPR_2016_paper.pdf,Unconstrained Face Alignment via Cascaded Compositional Learning,2016 +115,COFW,cofw,22.42031295,114.20788644,Chinese University of Hong Kong,edu,2c17d36bab56083293456fe14ceff5497cc97d75,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhu_Unconstrained_Face_Alignment_CVPR_2016_paper.pdf,Unconstrained Face Alignment via Cascaded Compositional Learning,2016 +116,COFW,cofw,51.24303255,-0.59001382,University of Surrey,edu,438e7999c937b94f0f6384dbeaa3febff6d283b6,citation,https://arxiv.org/pdf/1705.02402v2.pdf,"Face Detection, Bounding Box Aggregation and Pose Estimation for Robust Facial Landmark Localisation in the Wild",2017 +117,COFW,cofw,31.4854255,120.2739581,Jiangnan University,edu,438e7999c937b94f0f6384dbeaa3febff6d283b6,citation,https://arxiv.org/pdf/1705.02402v2.pdf,"Face Detection, Bounding Box Aggregation and Pose Estimation for Robust Facial Landmark Localisation in the Wild",2017 +118,COFW,cofw,40.0044795,116.370238,Chinese Academy of Sciences,edu,5ee0103048e1ce46e34a04c45ff2c2c31529b466,citation,https://doi.org/10.1109/ICIP.2015.7350886,Learning occlusion patterns using semantic phrases for object detection,2015 +119,COFW,cofw,32.77824165,34.99565673,Open University of Israel,edu,c75e6ce54caf17b2780b4b53f8d29086b391e839,citation,https://arxiv.org/pdf/1802.00542.pdf,"ExpNet: Landmark-Free, Deep, 3D Facial Expressions",2018 +120,COFW,cofw,51.24303255,-0.59001382,University of Surrey,edu,96c6f50ce8e1b9e8215b8791dabd78b2bbd5f28d,citation,https://arxiv.org/pdf/1611.05396.pdf,Dynamic Attention-Controlled Cascaded Shape Regression Exploiting Training Data Augmentation and Fuzzy-Set Sample Weighting,2017 +121,COFW,cofw,31.4854255,120.2739581,Jiangnan University,edu,96c6f50ce8e1b9e8215b8791dabd78b2bbd5f28d,citation,https://arxiv.org/pdf/1611.05396.pdf,Dynamic Attention-Controlled Cascaded Shape Regression Exploiting Training Data Augmentation and Fuzzy-Set Sample Weighting,2017 +122,COFW,cofw,30.60903415,114.3514284,Wuhan University of Technology,edu,258b3b1df82186dd76064ef86b28555e91389b73,citation,https://doi.org/10.1109/ACCESS.2017.2739822,Initial Shape Pool Construction for Facial Landmark Localization Under Occlusion,2017 +123,COFW,cofw,22.42031295,114.20788644,Chinese University of Hong Kong,edu,85674b1b6007634f362cbe9b921912b697c0a32c,citation,http://pdfs.semanticscholar.org/8567/4b1b6007634f362cbe9b921912b697c0a32c.pdf,Optimizing Facial Landmark Detection by Facial Attribute Learning,2014 +124,COFW,cofw,43.07982815,-89.43066425,University of Wisconsin Madison,edu,716d6c2eb8a0d8089baf2087ce9fcd668cd0d4c0,citation,http://pdfs.semanticscholar.org/ec7f/c7bf79204166f78c27e870b620205751fff6.pdf,Pose-Robust 3D Facial Landmark Estimation from a Single 2D Image,2016 +125,COFW,cofw,40.0141905,-83.0309143,University of Electronic Science and Technology of China,edu,2a84f7934365f05b6707ea0ac225210f78e547af,citation,https://doi.org/10.1109/ICPR.2016.7899690,A joint facial point detection method of deep convolutional network and shape regression,2016 +126,COFW,cofw,41.40657415,2.1945341,Universitat Oberta de Catalunya,edu,cc4fc9a309f300e711e09712701b1509045a8e04,citation,https://pdfs.semanticscholar.org/cea6/9010a2f75f7a057d56770e776dec206ed705.pdf,Continuous Supervised Descent Method for Facial Landmark Localisation,2016 +127,COFW,cofw,13.65450525,100.49423171,Robotics Institute,edu,cc4fc9a309f300e711e09712701b1509045a8e04,citation,https://pdfs.semanticscholar.org/cea6/9010a2f75f7a057d56770e776dec206ed705.pdf,Continuous Supervised Descent Method for Facial Landmark Localisation,2016 +128,COFW,cofw,40.44415295,-79.96243993,University of Pittsburgh,edu,cc4fc9a309f300e711e09712701b1509045a8e04,citation,https://pdfs.semanticscholar.org/cea6/9010a2f75f7a057d56770e776dec206ed705.pdf,Continuous Supervised Descent Method for Facial Landmark Localisation,2016 +129,COFW,cofw,1.2962018,103.77689944,National University of Singapore,edu,30cd39388b5c1aae7d8153c0ab9d54b61b474ffe,citation,http://pdfs.semanticscholar.org/3be8/f1f7501978287af8d7ebfac5963216698249.pdf,Deep Cascaded Regression for Face Alignment,2015 +130,COFW,cofw,23.09461185,113.28788994,Sun Yat-Sen University,edu,30cd39388b5c1aae7d8153c0ab9d54b61b474ffe,citation,http://pdfs.semanticscholar.org/3be8/f1f7501978287af8d7ebfac5963216698249.pdf,Deep Cascaded Regression for Face Alignment,2015 +131,COFW,cofw,22.42031295,114.20788644,Chinese University of Hong Kong,edu,8a3c5507237957d013a0fe0f082cab7f757af6ee,citation,http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf,Facial Landmark Detection by Deep Multi-task Learning,2014 +132,COFW,cofw,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,cf5c9b521c958b84bb63bea9d5cbb522845e4ba7,citation,http://pdfs.semanticscholar.org/cf5c/9b521c958b84bb63bea9d5cbb522845e4ba7.pdf,Towards Arbitrary-View Face Alignment by Recommendation Trees,2015 +133,COFW,cofw,22.42031295,114.20788644,Chinese University of Hong Kong,edu,cf5c9b521c958b84bb63bea9d5cbb522845e4ba7,citation,http://pdfs.semanticscholar.org/cf5c/9b521c958b84bb63bea9d5cbb522845e4ba7.pdf,Towards Arbitrary-View Face Alignment by Recommendation Trees,2015 +134,COFW,cofw,30.642769,104.06751175,"Sichuan University, Chengdu",edu,b29b42f7ab8d25d244bfc1413a8d608cbdc51855,citation,http://pdfs.semanticscholar.org/b29b/42f7ab8d25d244bfc1413a8d608cbdc51855.pdf,Effective face landmark localization via single deep network,2017 +135,COFW,cofw,23.0490047,113.3971571,South China University of China,edu,7d7be6172fc2884e1da22d1e96d5899a29831ad2,citation,http://pdfs.semanticscholar.org/7d7b/e6172fc2884e1da22d1e96d5899a29831ad2.pdf,L2GSCI: Local to Global Seam Cutting and Integrating for Accurate Face Contour Extraction,2017 +136,COFW,cofw,22.46935655,114.19474194,Education University of Hong Kong,edu,7d7be6172fc2884e1da22d1e96d5899a29831ad2,citation,http://pdfs.semanticscholar.org/7d7b/e6172fc2884e1da22d1e96d5899a29831ad2.pdf,L2GSCI: Local to Global Seam Cutting and Integrating for Accurate Face Contour Extraction,2017 +137,COFW,cofw,39.9922379,116.30393816,Peking University,edu,8c048be9dd2b601808b893b5d3d51f00907bdee0,citation,https://doi.org/10.1631/FITEE.1600041,Spontaneous versus posed smile recognition via region-specific texture descriptor and geometric facial dynamics,2017 +138,COFW,cofw,22.42031295,114.20788644,Chinese University of Hong Kong,edu,433a6d6d2a3ed8a6502982dccc992f91d665b9b3,citation,http://pdfs.semanticscholar.org/433a/6d6d2a3ed8a6502982dccc992f91d665b9b3.pdf,Transferring Landmark Annotations for Cross-Dataset Face Alignment,2014 +139,COFW,cofw,40.00229045,116.32098908,Tsinghua University,edu,433a6d6d2a3ed8a6502982dccc992f91d665b9b3,citation,http://pdfs.semanticscholar.org/433a/6d6d2a3ed8a6502982dccc992f91d665b9b3.pdf,Transferring Landmark Annotations for Cross-Dataset Face Alignment,2014 +140,COFW,cofw,47.05821,15.46019568,Graz University of Technology,edu,96a9ca7a8366ae0efe6b58a515d15b44776faf6e,citation,https://arxiv.org/pdf/1609.00129.pdf,Grid Loss: Detecting Occluded Faces,2016 +141,COFW,cofw,40.0044795,116.370238,Chinese Academy of Sciences,edu,3d18ce183b5a5b4dcaa1216e30b774ef49eaa46f,citation,https://arxiv.org/pdf/1511.07212.pdf,Face Alignment in Full Pose Range: A 3D Total Solution,2017 +142,COFW,cofw,42.718568,-84.47791571,Michigan State University,edu,3d18ce183b5a5b4dcaa1216e30b774ef49eaa46f,citation,https://arxiv.org/pdf/1511.07212.pdf,Face Alignment in Full Pose Range: A 3D Total Solution,2017 +143,COFW,cofw,31.30104395,121.50045497,Fudan University,edu,862d17895fe822f7111e737cbcdd042ba04377e8,citation,http://pdfs.semanticscholar.org/862d/17895fe822f7111e737cbcdd042ba04377e8.pdf,Semi-Latent GAN: Learning to generate and modify facial images from attributes,2017 +144,COFW,cofw,42.718568,-84.47791571,Michigan State University,edu,085ceda1c65caf11762b3452f87660703f914782,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Jourabloo_Large-Pose_Face_Alignment_CVPR_2016_paper.pdf,Large-Pose Face Alignment via CNN-Based Dense 3D Model Fitting,2016 +145,COFW,cofw,39.977217,116.337632,Microsoft Research Asia,company,9aade3d26996ce7ef6d657130464504b8d812534,citation,https://doi.org/10.1109/TNNLS.2016.2618340,Face Alignment With Deep Regression,2018 +146,COFW,cofw,30.5097537,114.4062881,Huazhong University of Science and Technology,edu,9aade3d26996ce7ef6d657130464504b8d812534,citation,https://doi.org/10.1109/TNNLS.2016.2618340,Face Alignment With Deep Regression,2018 +147,COFW,cofw,40.0044795,116.370238,Chinese Academy of Sciences,edu,055cd8173536031e189628c879a2acad6cf2a5d0,citation,https://doi.org/10.1109/BTAS.2017.8272740,Fast multi-view face alignment via multi-task auto-encoders,2017 +148,COFW,cofw,36.20304395,117.05842113,Tianjin University,edu,4223917177405eaa6bdedca061eb28f7b440ed8e,citation,http://pdfs.semanticscholar.org/4223/917177405eaa6bdedca061eb28f7b440ed8e.pdf,B-spline Shape from Motion & Shading: An Automatic Free-form Surface Modeling for Face Reconstruction,2016 +149,COFW,cofw,22.304572,114.17976285,Hong Kong Polytechnic University,edu,4cfa8755fe23a8a0b19909fa4dec54ce6c1bd2f7,citation,https://arxiv.org/pdf/1611.09956v1.pdf,Efficient likelihood Bayesian constrained local model,2017 +150,COFW,cofw,40.0044795,116.370238,Chinese Academy of Sciences,edu,2a4153655ad1169d482e22c468d67f3bc2c49f12,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhu_Face_Alignment_Across_CVPR_2016_paper.pdf,Face Alignment Across Large Poses: A 3D Solution,2016 +151,COFW,cofw,42.718568,-84.47791571,Michigan State University,edu,2a4153655ad1169d482e22c468d67f3bc2c49f12,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhu_Face_Alignment_Across_CVPR_2016_paper.pdf,Face Alignment Across Large Poses: A 3D Solution,2016 +152,COFW,cofw,40.0044795,116.370238,Chinese Academy of Sciences,edu,090ff8f992dc71a1125636c1adffc0634155b450,citation,http://pdfs.semanticscholar.org/090f/f8f992dc71a1125636c1adffc0634155b450.pdf,Topic-Aware Deep Auto-Encoders (TDA) for Face Alignment,2014 +153,COFW,cofw,51.49887085,-0.17560797,Imperial College London,edu,090ff8f992dc71a1125636c1adffc0634155b450,citation,http://pdfs.semanticscholar.org/090f/f8f992dc71a1125636c1adffc0634155b450.pdf,Topic-Aware Deep Auto-Encoders (TDA) for Face Alignment,2014 +154,COFW,cofw,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,090ff8f992dc71a1125636c1adffc0634155b450,citation,http://pdfs.semanticscholar.org/090f/f8f992dc71a1125636c1adffc0634155b450.pdf,Topic-Aware Deep Auto-Encoders (TDA) for Face Alignment,2014 +155,COFW,cofw,40.0044795,116.370238,Chinese Academy of Sciences,edu,26949c1ba7f55f0c389000aa234238bf01a32d3b,citation,https://doi.org/10.1109/ICIP.2017.8296814,Coupled cascade regression for simultaneous facial landmark detection and head pose estimation,2017 +156,COFW,cofw,42.7298459,-73.67950216,Rensselaer Polytechnic Institute,edu,26949c1ba7f55f0c389000aa234238bf01a32d3b,citation,https://doi.org/10.1109/ICIP.2017.8296814,Coupled cascade regression for simultaneous facial landmark detection and head pose estimation,2017 +157,COFW,cofw,-27.49741805,153.01316956,University of Queensland,edu,de79437f74e8e3b266afc664decf4e6e4bdf34d7,citation,https://doi.org/10.1109/IVCNZ.2016.7804415,To face or not to face: Towards reducing false positive of face detection,2016 +158,COFW,cofw,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,a26fd9df58bb76d6c7a3254820143b3da5bd584b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8446759,Monitor Pupils' Attention by Image Super-Resolution and Anomaly Detection,2017 +159,COFW,cofw,51.5247272,-0.03931035,Queen Mary University of London,edu,0f81b0fa8df5bf3fcfa10f20120540342a0c92e5,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2015.7299100,"Mirror, mirror on the wall, tell me, is the error small?",2015 +160,COFW,cofw,31.2284923,121.40211389,East China Normal University,edu,83295bce2340cb87901499cff492ae6ff3365475,citation,https://arxiv.org/pdf/1808.01558.pdf,Deep Multi-Center Learning for Face Alignment,2018 +161,COFW,cofw,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,83295bce2340cb87901499cff492ae6ff3365475,citation,https://arxiv.org/pdf/1808.01558.pdf,Deep Multi-Center Learning for Face Alignment,2018 +162,COFW,cofw,52.9387428,-1.20029569,University of Nottingham,edu,721e5ba3383b05a78ef1dfe85bf38efa7e2d611d,citation,http://pdfs.semanticscholar.org/74f1/9d0986c9d39aabb359abaa2a87a248a48deb.pdf,"BULAT, TZIMIROPOULOS: CONVOLUTIONAL AGGREGATION OF LOCAL EVIDENCE 1 Convolutional aggregation of local evidence for large pose face alignment",2016 +163,COFW,cofw,32.0565957,118.77408833,Nanjing University,edu,ad5a35a251e07628dd035c68e44a64c53652be6b,citation,https://doi.org/10.1016/j.patcog.2016.12.024,Robust facial landmark tracking via cascade regression,2017 +164,COFW,cofw,39.9922379,116.30393816,Peking University,edu,5df17c81c266cf2ebb0778e48e825905e161a8d9,citation,https://doi.org/10.1109/TMM.2016.2520091,A Novel Lip Descriptor for Audio-Visual Keyword Spotting Based on Adaptive Decision Fusion,2016 +165,COFW,cofw,45.2182986,5.80703193,INRIA Grenoble,edu,5df17c81c266cf2ebb0778e48e825905e161a8d9,citation,https://doi.org/10.1109/TMM.2016.2520091,A Novel Lip Descriptor for Audio-Visual Keyword Spotting Based on Adaptive Decision Fusion,2016 +166,COFW,cofw,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,3d9e44d8f8bc2663192c7ce668ccbbb084e466e4,citation,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019505,Learning a multi-center convolutional network for unconstrained face alignment,2017 +167,COFW,cofw,53.8338371,10.7035939,Institute of Systems and Robotics,edu,6604fd47f92ce66dd0c669dd66b347b80e17ebc9,citation,https://pdfs.semanticscholar.org/6604/fd47f92ce66dd0c669dd66b347b80e17ebc9.pdf,Simultaneous Cascaded Regression,2018 +168,COFW,cofw,25.01353105,121.54173736,National Taiwan University of Science and Technology,edu,e4e07f5f201c6986e93ddb42dcf11a43c339ea2e,citation,https://doi.org/10.1109/BTAS.2017.8272722,Cross-pose landmark localization using multi-dropout framework,2017 +169,COFW,cofw,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,9048732c8591a92a1f4f589b520a733f07578f80,citation,https://doi.org/10.1109/CISP-BMEI.2017.8301921,Improved CNN-based facial landmarks tracking via ridge regression at 150 Fps on mobile devices,2017 +170,COFW,cofw,31.83907195,117.26420748,University of Science and Technology of China,edu,084bd02d171e36458f108f07265386f22b34a1ae,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Ren_Face_Alignment_at_2014_CVPR_paper.pdf,Face Alignment at 3000 FPS via Regressing Local Binary Features,2014 +171,COFW,cofw,33.6431901,-117.84016494,"University of California, Irvine",edu,65126e0b1161fc8212643b8ff39c1d71d262fbc1,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Ghiasi_Occlusion_Coherence_Localizing_2014_CVPR_paper.pdf,Occlusion Coherence: Localizing Occluded Faces with a Hierarchical Deformable Part Model,2014 +172,COFW,cofw,17.4454957,78.34854698,International Institute of Information Technology,edu,156cd2a0e2c378e4c3649a1d046cd080d3338bca,citation,http://pdfs.semanticscholar.org/156c/d2a0e2c378e4c3649a1d046cd080d3338bca.pdf,Exemplar based approaches on Face Fiducial Detection and Frontalization,2017 +173,COFW,cofw,-27.5953995,-48.6154218,University of Campinas,edu,159b1e3c3ed0982061dae3cc8ab7d9b149a0cdb1,citation,https://doi.org/10.1109/TIP.2017.2694226,Weak Classifier for Density Estimation in Eye Localization and Tracking,2017 +174,COFW,cofw,-22.9541412,-43.1753638,Universidade Federal do Rio de Janeiro,edu,159b1e3c3ed0982061dae3cc8ab7d9b149a0cdb1,citation,https://doi.org/10.1109/TIP.2017.2694226,Weak Classifier for Density Estimation in Eye Localization and Tracking,2017 +175,COFW,cofw,38.99203005,-76.9461029,University of Maryland College Park,edu,b2cd92d930ed9b8d3f9dfcfff733f8384aa93de8,citation,http://pdfs.semanticscholar.org/b2cd/92d930ed9b8d3f9dfcfff733f8384aa93de8.pdf,"HyperFace: A Deep Multi-task Learning Framework for Face Detection, Landmark Localization, Pose Estimation, and Gender Recognition",2016 +176,COFW,cofw,39.2899685,-76.62196103,University of Maryland,edu,b2cd92d930ed9b8d3f9dfcfff733f8384aa93de8,citation,http://pdfs.semanticscholar.org/b2cd/92d930ed9b8d3f9dfcfff733f8384aa93de8.pdf,"HyperFace: A Deep Multi-task Learning Framework for Face Detection, Landmark Localization, Pose Estimation, and Gender Recognition",2016 +177,COFW,cofw,32.87935255,-117.23110049,"University of California, San Diego",edu,d4a5eaf2e9f2fd3e264940039e2cbbf08880a090,citation,https://arxiv.org/pdf/1802.02137.pdf,An Occluded Stacked Hourglass Approach to Facial Landmark Localization and Occlusion Estimation,2017 +178,COFW,cofw,30.274084,120.15507,Alibaba,company,89497854eada7e32f06aa8f3c0ceedc0e91ecfef,citation,https://doi.org/10.1109/TIP.2017.2784571,Deep Context-Sensitive Facial Landmark Detection With Tree-Structured Modeling,2018 +179,COFW,cofw,30.19331415,120.11930822,Zhejiang University,edu,89497854eada7e32f06aa8f3c0ceedc0e91ecfef,citation,https://doi.org/10.1109/TIP.2017.2784571,Deep Context-Sensitive Facial Landmark Detection With Tree-Structured Modeling,2018 +180,COFW,cofw,32.0565957,118.77408833,Nanjing University,edu,9cb7b3b14fd01cc2ed76784ab76304132dab6ff3,citation,https://doi.org/10.1109/ICIP.2015.7351174,Facial landmark detection via pose-induced auto-encoder networks,2015 +181,COFW,cofw,12.9803537,77.6975101,"Samsung R&D Institute, Bangalore, India",company,cf736f596bf881ca97ec4b29776baaa493b9d50e,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952629,Low Dimensional Deep Features for facial landmark alignment,2017 +182,COFW,cofw,46.0658836,11.1159894,University of Trento,edu,f201baf618574108bcee50e9a8b65f5174d832ee,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8031057,Viewpoint-Consistent 3D Face Alignment,2018 +183,COFW,cofw,13.65450525,100.49423171,Robotics Institute,edu,f201baf618574108bcee50e9a8b65f5174d832ee,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8031057,Viewpoint-Consistent 3D Face Alignment,2018 +184,COFW,cofw,50.74223495,-1.89433739,Bournemouth University,edu,370b6b83c7512419188f5373a962dd3175a56a9b,citation,https://pdfs.semanticscholar.org/370b/6b83c7512419188f5373a962dd3175a56a9b.pdf,Face Alignment Refinement via Exploiting Low-Rank property and Temporal Stability,2017 +185,COFW,cofw,30.19331415,120.11930822,Zhejiang University,edu,370b6b83c7512419188f5373a962dd3175a56a9b,citation,https://pdfs.semanticscholar.org/370b/6b83c7512419188f5373a962dd3175a56a9b.pdf,Face Alignment Refinement via Exploiting Low-Rank property and Temporal Stability,2017 +186,COFW,cofw,32.0565957,118.77408833,Nanjing University,edu,63c74794aedb40dd6b1650352a2da7a968180302,citation,https://doi.org/10.1016/j.neucom.2016.09.015,Recurrent neural network for facial landmark detection,2017 +187,COFW,cofw,38.88140235,121.52281098,Dalian University of Technology,edu,940e5c45511b63f609568dce2ad61437c5e39683,citation,https://doi.org/10.1109/TIP.2015.2390976,Fiducial Facial Point Extraction Using a Novel Projective Invariant,2015 +188,COFW,cofw,51.24303255,-0.59001382,University of Surrey,edu,3c6cac7ecf546556d7c6050f7b693a99cc8a57b3,citation,https://pdfs.semanticscholar.org/3c6c/ac7ecf546556d7c6050f7b693a99cc8a57b3.pdf,Robust facial landmark detection in the wild,2016 +189,COFW,cofw,40.0044795,116.370238,Chinese Academy of Sciences,edu,a820941eaf03077d68536732a4d5f28d94b5864a,citation,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Zhang_Leveraging_Datasets_With_ICCV_2015_paper.pdf,Leveraging Datasets with Varying Annotations for Face Alignment via Deep Regression Network,2015 +190,COFW,cofw,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,a820941eaf03077d68536732a4d5f28d94b5864a,citation,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Zhang_Leveraging_Datasets_With_ICCV_2015_paper.pdf,Leveraging Datasets with Varying Annotations for Face Alignment via Deep Regression Network,2015 +191,COFW,cofw,34.0687788,-118.4450094,"University of California, Los Angeles",edu,195d331c958f2da3431f37a344559f9bce09c0f7,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_066_ext.pdf,Parsing occluded people by flexible compositions,2015 +192,COFW,cofw,30.5097537,114.4062881,Huazhong University of Science and Technology,edu,5f448ab700528888019542e6fea1d1e0db6c35f2,citation,https://doi.org/10.1109/LSP.2016.2533721,Transferred Deep Convolutional Neural Network Features for Extensive Facial Landmark Localization,2016 +193,COFW,cofw,31.846918,117.29053367,Hefei University of Technology,edu,2f73203fd71b755a9601d00fc202bbbd0a595110,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8394868,Micro-expression Analysis by Fusing Deep Convolutional Neural Network and Optical Flow,2018 +194,COFW,cofw,33.620813,133.719755,Kochi University of Technology,edu,2f73203fd71b755a9601d00fc202bbbd0a595110,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8394868,Micro-expression Analysis by Fusing Deep Convolutional Neural Network and Optical Flow,2018 +195,COFW,cofw,32.0565957,118.77408833,Nanjing University,edu,5b0bf1063b694e4b1575bb428edb4f3451d9bf04,citation,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.131,Facial Shape Tracking via Spatio-Temporal Cascade Shape Regression,2015 +196,COFW,cofw,31.83907195,117.26420748,University of Science and Technology of China,edu,dd715a98dab34437ad05758b20cc640c2cdc5715,citation,https://doi.org/10.1007/s41095-017-0082-8,Joint head pose and facial landmark regression from depth images,2017 +197,COFW,cofw,52.17638955,0.14308882,University of Cambridge,edu,2aa2b312da1554a7f3e48f71f2fce7ade6d5bf40,citation,http://www.cl.cam.ac.uk/~pr10/publications/fg17.pdf,Estimating Sheep Pain Level Using Facial Action Unit Detection,2017 +198,COFW,cofw,55.7039571,13.1902011,Lund University,edu,995d55fdf5b6fe7fb630c93a424700d4bc566104,citation,http://openaccess.thecvf.com/content_iccv_2015/papers/Nilsson_The_One_Triangle_ICCV_2015_paper.pdf,The One Triangle Three Parallelograms Sampling Strategy and Its Application in Shape Regression,2015 +199,COFW,cofw,13.65450525,100.49423171,Robotics Institute,edu,b6f15bf8723b2d5390122442ab04630d2d3878d8,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163142,Dense 3D face alignment from 2D videos in real-time,2015 +200,COFW,cofw,32.77824165,34.99565673,Open University of Israel,edu,62e913431bcef5983955e9ca160b91bb19d9de42,citation,http://pdfs.semanticscholar.org/62e9/13431bcef5983955e9ca160b91bb19d9de42.pdf,Facial Landmark Detection with Tweaked Convolutional Neural Networks,2015 +201,COFW,cofw,37.4102193,-122.05965487,Carnegie Mellon University,edu,7cfbf90368553333b47731729e0e358479c25340,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7346480,"Towards a Unified Framework for Pose, Expression, and Occlusion Tolerant Automatic Facial Alignment",2016 +202,COFW,cofw,40.986904,29.0530981,"Marmara University, Istanbul, Turkey",edu,a78025f39cf78f2fc66c4b2942fbe5bad3ea65fc,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404357,A comparison of facial landmark detection methods,2018 +203,COFW,cofw,41.6771297,26.5557145,"Trakya University, Edirne, Turkey",edu,a78025f39cf78f2fc66c4b2942fbe5bad3ea65fc,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404357,A comparison of facial landmark detection methods,2018 +204,COFW,cofw,65.0592157,25.46632601,University of Oulu,edu,193debca0be1c38dabc42dc772513e6653fd91d8,citation,http://ibug.doc.ic.ac.uk/media/uploads/documents/trigeorgis2016mnemonic.pdf,Mnemonic Descent Method: A Recurrent Process Applied for End-to-End Face Alignment,2016 +205,COFW,cofw,51.5217668,-0.13019072,University of London,edu,193debca0be1c38dabc42dc772513e6653fd91d8,citation,http://ibug.doc.ic.ac.uk/media/uploads/documents/trigeorgis2016mnemonic.pdf,Mnemonic Descent Method: A Recurrent Process Applied for End-to-End Face Alignment,2016 +206,COFW,cofw,51.49887085,-0.17560797,Imperial College London,edu,193debca0be1c38dabc42dc772513e6653fd91d8,citation,http://ibug.doc.ic.ac.uk/media/uploads/documents/trigeorgis2016mnemonic.pdf,Mnemonic Descent Method: A Recurrent Process Applied for End-to-End Face Alignment,2016 +207,COFW,cofw,50.7791703,6.06728733,RWTH Aachen University,edu,141ee531d03fb6626043e33dd8f269a6f1f63a4b,citation,https://arxiv.org/pdf/1808.09316.pdf,How Robust is 3D Human Pose Estimation to Occlusion?,2018 +208,COFW,cofw,42.718568,-84.47791571,Michigan State University,edu,37ce1d3a6415d6fc1760964e2a04174c24208173,citation,http://www.cse.msu.edu/~liuxm/publication/Jourabloo_Liu_ICCV2015.pdf,Pose-Invariant 3D Face Alignment,2015 +209,COFW,cofw,38.88140235,121.52281098,Dalian University of Technology,edu,5f4219118556d2c627137827a617cf4e26242a6e,citation,https://doi.org/10.1109/TMM.2017.2751143,Explicit Shape Regression With Characteristic Number for Facial Landmark Localization,2018 +210,COFW,cofw,42.2942142,-83.71003894,University of Michigan,edu,860588fafcc80c823e66429fadd7e816721da42a,citation,https://arxiv.org/pdf/1804.04412.pdf,Unsupervised Discovery of Object Landmarks as Structural Representations,2018 +211,COFW,cofw,26.88111275,112.62850666,Hunan University,edu,4b936847f39094d6cb0bde68cea654d948c4735d,citation,http://doi.org/10.1007/s11042-016-3470-7,Face alignment under occlusion based on local and global feature regression,2016 +212,COFW,cofw,34.13710185,-118.12527487,California Institute of Technology,edu,56ae6d94fc6097ec4ca861f0daa87941d1c10b70,citation,http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf,Distance Estimation of an Unknown Person from a Portrait,2014 +213,COFW,cofw,40.0044795,116.370238,Chinese Academy of Sciences,edu,51b42da0706a1260430f27badcf9ee6694768b9b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7471882,Shape initialization without ground truth for face alignment,2016 +214,COFW,cofw,35.9542493,-83.9307395,University of Tennessee,edu,c2e03efd8c5217188ab685e73cc2e52c54835d1a,citation,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477585,Deep tree-structured face: A unified representation for multi-task facial biometrics,2016 +215,COFW,cofw,40.47913175,-74.43168868,Rutgers University,edu,3b470b76045745c0ef5321e0f1e0e6a4b1821339,citation,http://pdfs.semanticscholar.org/8e72/fa02f2d90ba31f31e0a7aa96a6d3e10a66fc.pdf,Consensus of Regression for Occlusion-Robust Facial Feature Localization,2014 +216,COFW,cofw,30.642769,104.06751175,"Sichuan University, Chengdu",edu,3080026f2f0846d520bd5bacb0cb2acea0ffe16b,citation,https://doi.org/10.1109/BTAS.2017.8272690,2.5D cascaded regression for robust facial landmark detection,2017 +217,COFW,cofw,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,3fdfd6fa7a1cc9142de1f53e4ac7c2a7ac64c2e3,citation,http://pdfs.semanticscholar.org/3fdf/d6fa7a1cc9142de1f53e4ac7c2a7ac64c2e3.pdf,Intensity-Depth Face Alignment Using Cascade Shape Regression,2015 +218,COFW,cofw,42.718568,-84.47791571,Michigan State University,edu,ec8ec2dfd73cf3667f33595fef84c95c42125945,citation,https://arxiv.org/pdf/1707.06286.pdf,Pose-Invariant Face Alignment with a Single CNN,2017 +219,COFW,cofw,39.977217,116.337632,Microsoft Research Asia,company,898ff1bafee2a6fb3c848ad07f6f292416b5f07d,citation,https://doi.org/10.1109/TIP.2016.2518867,Face Alignment via Regressing Local Binary Features,2016 +220,COFW,cofw,31.83907195,117.26420748,University of Science and Technology of China,edu,898ff1bafee2a6fb3c848ad07f6f292416b5f07d,citation,https://doi.org/10.1109/TIP.2016.2518867,Face Alignment via Regressing Local Binary Features,2016 +221,COFW,cofw,47.6423318,-122.1369302,Microsoft,company,898ff1bafee2a6fb3c848ad07f6f292416b5f07d,citation,https://doi.org/10.1109/TIP.2016.2518867,Face Alignment via Regressing Local Binary Features,2016 +222,COFW,cofw,39.977217,116.337632,Microsoft Research Asia,company,63d865c66faaba68018defee0daf201db8ca79ed,citation,http://pdfs.semanticscholar.org/63d8/65c66faaba68018defee0daf201db8ca79ed.pdf,Deep Regression for Face Alignment,2014 +223,COFW,cofw,52.17638955,0.14308882,University of Cambridge,edu,be57d2aaab615ec8bc1dd2dba8bee41a4d038b85,citation,http://doi.acm.org/10.1145/2946796,Automatic Analysis of Naturalistic Hand-Over-Face Gestures,2016 +224,COFW,cofw,-33.8840504,151.1992254,University of Technology,edu,336488746cc76e7f13b0ec68ccfe4df6d76cdc8f,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7762938,Adaptive Cascade Regression Model For Robust Face Alignment,2017 +225,COFW,cofw,42.36782045,-71.12666653,Harvard University,edu,023be757b1769ecb0db810c95c010310d7daf00b,citation,http://pdfs.semanticscholar.org/023b/e757b1769ecb0db810c95c010310d7daf00b.pdf,Face Alignment Assisted by Head Pose Estimation,2015 +226,COFW,cofw,51.5247272,-0.03931035,Queen Mary University of London,edu,023be757b1769ecb0db810c95c010310d7daf00b,citation,http://pdfs.semanticscholar.org/023b/e757b1769ecb0db810c95c010310d7daf00b.pdf,Face Alignment Assisted by Head Pose Estimation,2015 +227,COFW,cofw,52.17638955,0.14308882,University of Cambridge,edu,023be757b1769ecb0db810c95c010310d7daf00b,citation,http://pdfs.semanticscholar.org/023b/e757b1769ecb0db810c95c010310d7daf00b.pdf,Face Alignment Assisted by Head Pose Estimation,2015 +228,COFW,cofw,39.2899685,-76.62196103,University of Maryland,edu,93420d9212dd15b3ef37f566e4d57e76bb2fab2f,citation,https://arxiv.org/pdf/1611.00851.pdf,An All-In-One Convolutional Neural Network for Face Analysis,2017 +229,COFW,cofw,34.13710185,-118.12527487,California Institute of Technology,edu,72282287f25c5419dc6fd9e89ec9d86d660dc0b5,citation,https://arxiv.org/pdf/1609.07495v1.pdf,A Rotation Invariant Latent Factor Model for Moveme Discovery from Static Poses,2016 +230,COFW,cofw,51.5247272,-0.03931035,Queen Mary University of London,edu,f11c76efdc9651db329c8c862652820d61933308,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163100,Inference of personality traits and affect schedule by analysis of spontaneous reactions to affective videos,2015 +231,COFW,cofw,46.0658836,11.1159894,University of Trento,edu,f11c76efdc9651db329c8c862652820d61933308,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163100,Inference of personality traits and affect schedule by analysis of spontaneous reactions to affective videos,2015 diff --git a/site/datasets/final/feret.csv b/site/datasets/final/feret.csv new file mode 100644 index 00000000..24f9991f --- /dev/null +++ b/site/datasets/final/feret.csv @@ -0,0 +1,639 @@ +index,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,FERET,feret,0.0,0.0,,,0c4a139bb87c6743c7905b29a3cfec27a5130652,main,http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf,The FERET Verification Testing Protocol for Face Recognition Algorithms,1998 +1,FERET,feret,-27.47715625,153.02841004,Queensland University of Technology,edu,919d0e681c4ef687bf0b89fe7c0615221e9a1d30,citation,http://pdfs.semanticscholar.org/919d/0e681c4ef687bf0b89fe7c0615221e9a1d30.pdf,Fractal Techniques for Face Recognition,2009 +2,FERET,feret,30.5097537,114.4062881,Huazhong University of Science and Technology,edu,51ed4c92cab9336a2ac41fa8e0293c2f5f9bf3b6,citation,http://pdfs.semanticscholar.org/51ed/4c92cab9336a2ac41fa8e0293c2f5f9bf3b6.pdf,"A Survey of Face Detection, Extraction and Recognition",2003 +3,FERET,feret,22.053565,113.39913285,Jilin University,edu,8aff9c8a0e17be91f55328e5be5e94aea5227a35,citation,https://doi.org/10.1109/TNNLS.2012.2191620,Sparse Tensor Discriminant Color Space for Face Verification,2012 +4,FERET,feret,42.3898055,-71.1475986,Raytheon BBN Technologies,company,8aff9c8a0e17be91f55328e5be5e94aea5227a35,citation,https://doi.org/10.1109/TNNLS.2012.2191620,Sparse Tensor Discriminant Color Space for Face Verification,2012 +5,FERET,feret,32.0565957,118.77408833,Nanjing University,edu,8aff9c8a0e17be91f55328e5be5e94aea5227a35,citation,https://doi.org/10.1109/TNNLS.2012.2191620,Sparse Tensor Discriminant Color Space for Face Verification,2012 +6,FERET,feret,30.284151,-97.73195598,University of Texas at Austin,edu,d3b5a52062e5f5415df527705cb24af9b0846617,citation,http://pdfs.semanticscholar.org/d3b5/a52062e5f5415df527705cb24af9b0846617.pdf,Advances and Challenges in 3D and 2D+3D Human Face Recognition,2007 +7,FERET,feret,40.44415295,-79.96243993,University of Pittsburgh,edu,03167776e17bde31b50f294403f97ee068515578,citation,http://pdfs.semanticscholar.org/0316/7776e17bde31b50f294403f97ee068515578.pdf,Chapter 11. Facial Expression Analysis,2004 +8,FERET,feret,22.9991916,120.21625134,National Cheng Kung University,edu,658eb1fd14808d10e0f4fee99c5506a1bb0e351a,citation,https://pdfs.semanticscholar.org/658e/b1fd14808d10e0f4fee99c5506a1bb0e351a.pdf,Multi-Discriminant Classification Algorithm for Face Verification,2008 +9,FERET,feret,33.30715065,-111.67653157,Arizona State University,edu,49570b41bd9574bd9c600e24b269d945c645b7bd,citation,http://pdfs.semanticscholar.org/4957/0b41bd9574bd9c600e24b269d945c645b7bd.pdf,A Framework for Performance Evaluation of Face Recognition Algorithms,2002 +10,FERET,feret,51.49887085,-0.17560797,Imperial College London,edu,d65b82b862cf1dbba3dee6541358f69849004f30,citation,http://pdfs.semanticscholar.org/d65b/82b862cf1dbba3dee6541358f69849004f30.pdf,2.5D Elastic graph matching,2011 +11,FERET,feret,40.7286484,-73.9956863,Courant Institute of Mathematical Sciences,edu,6d5e12ee5d75d5f8c04a196dd94173f96dc8603f,citation,http://www.cs.toronto.edu/~hinton/csc2535_06/readings/chopra-05.pdf,"Learning a similarity metric discriminatively, with application to face verification",2005 +12,FERET,feret,40.72925325,-73.99625394,New York University,edu,6d5e12ee5d75d5f8c04a196dd94173f96dc8603f,citation,http://www.cs.toronto.edu/~hinton/csc2535_06/readings/chopra-05.pdf,"Learning a similarity metric discriminatively, with application to face verification",2005 +13,FERET,feret,51.24303255,-0.59001382,University of Surrey,edu,0e1403f2182609fb64ed72913f7294fea7d02bd6,citation,http://pdfs.semanticscholar.org/9457/cdb4b1f4764f70fe86b50e26abc34930f882.pdf,Learning Support Vectors for Face Verification and Recognition,2000 +14,FERET,feret,50.0764296,14.41802312,Czech Technical University,edu,0e1403f2182609fb64ed72913f7294fea7d02bd6,citation,http://pdfs.semanticscholar.org/9457/cdb4b1f4764f70fe86b50e26abc34930f882.pdf,Learning Support Vectors for Face Verification and Recognition,2000 +15,FERET,feret,51.24303255,-0.59001382,University of Surrey,edu,fe9a6a93af9c32f6b0454a7cf6897409124514bd,citation,http://pdfs.semanticscholar.org/fe9a/6a93af9c32f6b0454a7cf6897409124514bd.pdf,Designing a smart card face verification system,2006 +16,FERET,feret,39.1254938,-77.22293475,National Institute of Standards and Technology,edu,92a3d5ab3eb540a11eddf1b836c1db28640b2746,citation,http://pdfs.semanticscholar.org/92a3/d5ab3eb540a11eddf1b836c1db28640b2746.pdf,Face Recognition using 3D Facial Shape and Color Map Information: Comparison and Combination,2004 +17,FERET,feret,37.4102193,-122.05965487,Carnegie Mellon University,edu,23fc83c8cfff14a16df7ca497661264fc54ed746,citation,http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf,Comprehensive Database for Facial Expression Analysis,2000 +18,FERET,feret,40.44415295,-79.96243993,University of Pittsburgh,edu,23fc83c8cfff14a16df7ca497661264fc54ed746,citation,http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf,Comprehensive Database for Facial Expression Analysis,2000 +19,FERET,feret,51.49887085,-0.17560797,Imperial College London,edu,54bb25a213944b08298e4e2de54f2ddea890954a,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf,"AgeDB: The First Manually Collected, In-the-Wild Age Database",2017 +20,FERET,feret,51.59029705,-0.22963221,Middlesex University,edu,54bb25a213944b08298e4e2de54f2ddea890954a,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf,"AgeDB: The First Manually Collected, In-the-Wild Age Database",2017 +21,FERET,feret,40.4319722,-86.92389368,Purdue University,edu,aec46facf3131a5be4fc23db4ebfb5514e904ae3,citation,http://pdfs.semanticscholar.org/aec4/6facf3131a5be4fc23db4ebfb5514e904ae3.pdf,Audio to the rescue,2004 +22,FERET,feret,39.1254938,-77.22293475,National Institute of Standards and Technology,edu,544c06584c95bfdcafbd62e04fb796e575981476,citation,http://pdfs.semanticscholar.org/544c/06584c95bfdcafbd62e04fb796e575981476.pdf,Human Identification from Body Shape,2003 +23,FERET,feret,40.44415295,-79.96243993,University of Pittsburgh,edu,84a74ef8680b66e6dccbc69ae80321a52780a68e,citation,http://doi.org/10.1007/978-0-85729-932-1_19,Facial Expression Recognition,2011 +24,FERET,feret,13.65450525,100.49423171,Robotics Institute,edu,84a74ef8680b66e6dccbc69ae80321a52780a68e,citation,http://doi.org/10.1007/978-0-85729-932-1_19,Facial Expression Recognition,2011 +25,FERET,feret,35.14479945,33.90492318,Eastern Mediterranean University,edu,b3cc2554449fb10002250bbc178e1009fc2fdb70,citation,http://pdfs.semanticscholar.org/b3cc/2554449fb10002250bbc178e1009fc2fdb70.pdf,Face Recognition Based on Local Zernike Moments,2015 +26,FERET,feret,22.42031295,114.20788644,Chinese University of Hong Kong,edu,fbfb0de017d57c5f282050dadb77797d97785ba5,citation,http://pdfs.semanticscholar.org/fbfb/0de017d57c5f282050dadb77797d97785ba5.pdf,Enabling EBGM Face Authentication on mobile devices,2006 +27,FERET,feret,30.44235995,-84.29747867,Florida State University,edu,0a602b85c80cef7d38209226188aaab94d5349e8,citation,http://pdfs.semanticscholar.org/0a60/2b85c80cef7d38209226188aaab94d5349e8.pdf,THE FLORIDA STATE UNIVERSITY COLLEGE OF ARTS AND SCIENCES AUTOMATED FACE TRACKING AND RECOGNITION By MATTHEW,0 +28,FERET,feret,51.49887085,-0.17560797,Imperial College London,edu,a2bcfba155c990f64ffb44c0a1bb53f994b68a15,citation,https://doi.org/10.1109/CVPRW.2011.5981840,The Photoface database,2011 +29,FERET,feret,37.5600406,126.9369248,Yonsei University,edu,425833b5fe892b00dcbeb6e3975008e9a73a5a72,citation,http://pdfs.semanticscholar.org/4258/33b5fe892b00dcbeb6e3975008e9a73a5a72.pdf,A Review of Performance Evaluation for Biometrics Systems,2005 +30,FERET,feret,57.01590275,9.97532827,Aalborg University,edu,7ef44b7c2b5533d00001ae81f9293bdb592f1146,citation,https://pdfs.semanticscholar.org/7ef4/4b7c2b5533d00001ae81f9293bdb592f1146.pdf,Détection des émotions à partir de vidéos dans un environnement non contrôlé Detection of emotions from video in non-controlled environment,2003 +31,FERET,feret,-27.5533975,153.05336234,Griffith University,edu,6e968f74fd6b4b3b172c787f298b3d4746ec5cc9,citation,http://www.ict.griffith.edu.au/~junzhou/papers/C_DICTA_2013_C.pdf,A 3D Polygonal Line Chains Matching Method for Face Recognition,2013 +32,FERET,feret,39.2899685,-76.62196103,University of Maryland,edu,3a1c3307f57ef09577ac0dc8cd8b090a4fe8091f,citation,http://pdfs.semanticscholar.org/3a1c/3307f57ef09577ac0dc8cd8b090a4fe8091f.pdf,Thermal-to-visible face recognition using partial least squares.,2015 +33,FERET,feret,51.24303255,-0.59001382,University of Surrey,edu,81a8b2e55bcea9d9b26e67fcbb5a30ca8a8defc3,citation,http://multispectral-imagery-lab.sandbox.wvu.edu/files/d/337b61b4-b6af-4c96-8314-c282ebebf299/databasesizeeffectsonperformancesmartcardfaceverification.pdf,Database size effects on performance on a smart card face verification system,2006 +34,FERET,feret,41.10427915,29.02231159,Istanbul Technical University,edu,b8b0f0ca35cb02334aaa3192559fb35f0c90f8fa,citation,http://pdfs.semanticscholar.org/b8b0/f0ca35cb02334aaa3192559fb35f0c90f8fa.pdf,Face Recognition in Low-resolution Images by Using Local Zernike Moments,2014 +35,FERET,feret,1.29500195,103.84909214,Singapore Management University,edu,76d1c6c6b67e67ced1f19a89a5034dafc9599f25,citation,http://doi.acm.org/10.1145/2590296.2590315,Understanding OSN-based facial disclosure against face authentication systems,2014 +36,FERET,feret,50.89273635,-1.39464295,University of Southampton,edu,8a12edaf81fd38f81057cf9577c822eb09ff6fc1,citation,http://pdfs.semanticscholar.org/8a12/edaf81fd38f81057cf9577c822eb09ff6fc1.pdf,Measuring and mitigating targeted biometric impersonation,2014 +37,FERET,feret,65.0592157,25.46632601,University of Oulu,edu,8a12edaf81fd38f81057cf9577c822eb09ff6fc1,citation,http://pdfs.semanticscholar.org/8a12/edaf81fd38f81057cf9577c822eb09ff6fc1.pdf,Measuring and mitigating targeted biometric impersonation,2014 +38,FERET,feret,1.3484104,103.68297965,Nanyang Technological University,edu,4b86e711658003a600666d3ccfa4a9905463df1c,citation,https://pdfs.semanticscholar.org/4b86/e711658003a600666d3ccfa4a9905463df1c.pdf,Fusion of Appearance Image and Passive Stereo Depth Map for Face Recognition Based on the Bilateral 2DLDA,2007 +39,FERET,feret,40.7286484,-73.9956863,Courant Institute of Mathematical Sciences,edu,4b8d80f91d271f61b26db5ad627e24e59955c56a,citation,http://pdfs.semanticscholar.org/4b8d/80f91d271f61b26db5ad627e24e59955c56a.pdf,Learning Long-Range Vision for an Offroad Robot,2008 +40,FERET,feret,40.72925325,-73.99625394,New York University,edu,4b8d80f91d271f61b26db5ad627e24e59955c56a,citation,http://pdfs.semanticscholar.org/4b8d/80f91d271f61b26db5ad627e24e59955c56a.pdf,Learning Long-Range Vision for an Offroad Robot,2008 +41,FERET,feret,51.24303255,-0.59001382,University of Surrey,edu,7af15295224c3ad69d56f17ff635763dd008a8a4,citation,http://pdfs.semanticscholar.org/7af1/5295224c3ad69d56f17ff635763dd008a8a4.pdf,Learning Support Vectors for Face Authentication: Sensitivity to Mis-Registrations,2007 +42,FERET,feret,50.0764296,14.41802312,Czech Technical University,edu,7af15295224c3ad69d56f17ff635763dd008a8a4,citation,http://pdfs.semanticscholar.org/7af1/5295224c3ad69d56f17ff635763dd008a8a4.pdf,Learning Support Vectors for Face Authentication: Sensitivity to Mis-Registrations,2007 +43,FERET,feret,40.00229045,116.32098908,Tsinghua University,edu,5ea51401eea9a50a16bd17471bfd559d2d989760,citation,http://pdfs.semanticscholar.org/5ea5/1401eea9a50a16bd17471bfd559d2d989760.pdf,Robust Face Alignment Based on Hierarchical Classifier Network,2006 +44,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,71644fab2275cfd6a8f770a26aba4e6228e85dec,citation,http://www.jdl.ac.cn/doc/2011/20131910365517756_2012_eccv_mnkan_mvda.pdf,Multi-View Discriminant Analysis,2012 +45,FERET,feret,31.4006332,74.2137296,"COMSATS Institute of Information Technology, Lahore",edu,280bc9751593897091015aaf2cab39805768b463,citation,http://pdfs.semanticscholar.org/280b/c9751593897091015aaf2cab39805768b463.pdf,Gender Perception From Faces Using Boosted LBPH (Local Binary Patten Histograms),2013 +46,FERET,feret,34.0687788,-118.4450094,"University of California, Los Angeles",edu,23b80dc704e25cf52b5a14935002fc083ce9c317,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2007.383035,Learning Generative Models via Discriminative Approaches,2007 +47,FERET,feret,45.42580475,-75.68740118,University of Ottawa,edu,857ad04fca2740b016f0066b152bd1fa1171483f,citation,http://pdfs.semanticscholar.org/857a/d04fca2740b016f0066b152bd1fa1171483f.pdf,Sample Images can be Independently Restored from Face Recognition Templates,2003 +48,FERET,feret,8.76554685,77.65100445,Manonmaniam Sundaranar University,edu,87b81c8821a2cb9cdf26c75c1531717cab4b942f,citation,http://pdfs.semanticscholar.org/87b8/1c8821a2cb9cdf26c75c1531717cab4b942f.pdf,Face Detection with Facial Features and Gender Classification Based On Support Vector Machine,2010 +49,FERET,feret,43.66333345,-79.39769975,University of Toronto,edu,099ce5cb6f42bff5ad117852d62c5a07e6407b8a,citation,https://pdfs.semanticscholar.org/099c/e5cb6f42bff5ad117852d62c5a07e6407b8a.pdf,Spectral Methods for Multi-Scale Feature Extraction and Data Clustering,0 +50,FERET,feret,34.0224149,-118.28634407,University of Southern California,edu,21358489b5ce0e94ff37792a8a5eea198e7272f3,citation,http://pdfs.semanticscholar.org/c0cc/2073cad539d979fc6f860177b531b45fafc1.pdf,Face Inpainting with Local Linear Representations,2004 +51,FERET,feret,61.44964205,23.85877462,Tampere University of Technology,edu,dc4e4b9c507e8be2d832faf64e5a2e8887115265,citation,https://pdfs.semanticscholar.org/dc4e/4b9c507e8be2d832faf64e5a2e8887115265.pdf,Face Retrieval Based on Robust Local Features and Statistical-Structural Learning Approach,2008 +52,FERET,feret,37.3219575,127.1250723,Dankook University,edu,891d435fd1a070bb66225abfd62b2e2c5350e87c,citation,https://pdfs.semanticscholar.org/891d/435fd1a070bb66225abfd62b2e2c5350e87c.pdf,Selective Feature Generation Method for Classification of Low-dimensional Data,2018 +53,FERET,feret,32.8536333,-117.2035286,Kyung Hee University,edu,854b1f0581f5d3340f15eb79452363cbf38c04c8,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7903648,Directional Age-Primitive Pattern (DAPP) for Human Age Group Recognition and Age Estimation,2017 +54,FERET,feret,24.7246403,46.62335012,King Saud University,edu,854b1f0581f5d3340f15eb79452363cbf38c04c8,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7903648,Directional Age-Primitive Pattern (DAPP) for Human Age Group Recognition and Age Estimation,2017 +55,FERET,feret,23.7289899,90.3982682,Institute of Information Technology,edu,854b1f0581f5d3340f15eb79452363cbf38c04c8,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7903648,Directional Age-Primitive Pattern (DAPP) for Human Age Group Recognition and Age Estimation,2017 +56,FERET,feret,51.24303255,-0.59001382,University of Surrey,edu,cbb55f5885f9a0d0bfaa2c0bf5293ef45a04c5cd,citation,https://pdfs.semanticscholar.org/cbb5/5f5885f9a0d0bfaa2c0bf5293ef45a04c5cd.pdf,Performance Characterisation of Face Recognition Algorithms and Their Sensitivity to Severe Illumination Changes,2006 +57,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,cbb55f5885f9a0d0bfaa2c0bf5293ef45a04c5cd,citation,https://pdfs.semanticscholar.org/cbb5/5f5885f9a0d0bfaa2c0bf5293ef45a04c5cd.pdf,Performance Characterisation of Face Recognition Algorithms and Their Sensitivity to Severe Illumination Changes,2006 +58,FERET,feret,53.21967825,6.56251482,University of Groningen,edu,d8896861126b7fd5d2ceb6fed8505a6dff83414f,citation,http://pdfs.semanticscholar.org/d889/6861126b7fd5d2ceb6fed8505a6dff83414f.pdf,In-plane Rotational Alignment of Faces by Eye and Eye-pair Detection,2015 +59,FERET,feret,29.5084174,106.57858552,Chongqing University,edu,e1d1540a718bb7a933e21339f1a2d90660af7353,citation,http://doi.org/10.1007/s11063-018-9852-2,Discriminative Probabilistic Latent Semantic Analysis with Application to Single Sample Face Recognition,2018 +60,FERET,feret,22.42031295,114.20788644,Chinese University of Hong Kong,edu,55498d89f9eb0c9df9760f5e0e47a15ae7e92f25,citation,http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/264.pdf,Learning-based face hallucination in DCT domain,2008 +61,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,f12813073a7f894f82fe2b166893424edba7dc79,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2008.4587375,Unified Principal Component Analysis with generalized Covariance Matrix for face recognition,2008 +62,FERET,feret,45.7413921,126.62552755,Harbin Institute of Technology,edu,f12813073a7f894f82fe2b166893424edba7dc79,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2008.4587375,Unified Principal Component Analysis with generalized Covariance Matrix for face recognition,2008 +63,FERET,feret,39.87549675,32.78553506,Middle East Technical University,edu,946c2036c940e77260ade031ba413ec9f2435985,citation,http://pdfs.semanticscholar.org/946c/2036c940e77260ade031ba413ec9f2435985.pdf,PCA for Gender Estimation: Which Eigenvectors Contribute?,2002 +64,FERET,feret,36.1017956,-79.501733,Elon University,edu,a129c30b176820bf7f4756b4b4efc92d2a83f190,citation,https://pdfs.semanticscholar.org/a129/c30b176820bf7f4756b4b4efc92d2a83f190.pdf,Older adults' associative memory is modified by manner of presentation at encoding and retrieval.,2018 +65,FERET,feret,13.01119095,74.79498825,"National Institute of Technology, Karnataka",edu,e1fac9e9427499d3758213daf1c781b9a42a3420,citation,https://pdfs.semanticscholar.org/7c90/60a809bd28ef61421588f48e33f6eae6ddfd.pdf,Face Image Retrieval Based on Probe Sketch Using SIFT Feature Descriptors,2012 +66,FERET,feret,35.9542493,-83.9307395,University of Tennessee,edu,7735f63e5790006cb3d989c8c19910e40200abfc,citation,http://pdfs.semanticscholar.org/7735/f63e5790006cb3d989c8c19910e40200abfc.pdf,Multispectral Imaging For Face Recognition Over Varying Illumination,2008 +67,FERET,feret,32.0565957,118.77408833,Nanjing University,edu,f909d04c809013b930bafca12c0f9a8192df9d92,citation,http://pdfs.semanticscholar.org/f909/d04c809013b930bafca12c0f9a8192df9d92.pdf,Single Image Subspace for Face Recognition,2007 +68,FERET,feret,22.3386304,114.2620337,Hong Kong University of Science and Technology,edu,33abfe693258a4e00467494b11ee4d523379ab6b,citation,http://www.cse.ust.hk/~dyyeung/paper/pdf/yeung.icip2006a.pdf,Local Discriminant Embedding with Tensor Representation,2006 +69,FERET,feret,38.0333742,-84.5017758,University of Kentucky,edu,a1997d89f544cc862c63a972ef364b2ff38982e9,citation,https://pdfs.semanticscholar.org/a199/7d89f544cc862c63a972ef364b2ff38982e9.pdf,Can SNOMED CT Changes Be Used as a Surrogate Standard for Evaluating the Performance of Its Auditing Methods?,2017 +70,FERET,feret,-33.3578899,151.37834708,University of Newcastle,edu,aeb64f88302b9d4d23ee13ece5c9842dd43dc37f,citation,https://pdfs.semanticscholar.org/aeb6/4f88302b9d4d23ee13ece5c9842dd43dc37f.pdf,Recollection and confidence in two-alternative forced choice episodic recognition,2009 +71,FERET,feret,51.49887085,-0.17560797,Imperial College London,edu,e392816ec3e0b131bbab06431ac85b14afa7d656,citation,http://pdfs.semanticscholar.org/e392/816ec3e0b131bbab06431ac85b14afa7d656.pdf,A Simple and Efficient Supervised Method for Spatially Weighted PCA in Face Image Analysis,2010 +72,FERET,feret,34.1235825,108.83546,Xidian University,edu,3e76496aa3840bca2974d6d087bfa4267a390768,citation,https://pdfs.semanticscholar.org/3e76/496aa3840bca2974d6d087bfa4267a390768.pdf,Dictionary Learning in Optimal Metric Subspace,2018 +73,FERET,feret,39.9808333,116.34101249,Beihang University,edu,3e76496aa3840bca2974d6d087bfa4267a390768,citation,https://pdfs.semanticscholar.org/3e76/496aa3840bca2974d6d087bfa4267a390768.pdf,Dictionary Learning in Optimal Metric Subspace,2018 +74,FERET,feret,39.2899685,-76.62196103,University of Maryland,edu,355af3c3adbb17d25f0d2a4193e3daadffc0d4e8,citation,http://pdfs.semanticscholar.org/355a/f3c3adbb17d25f0d2a4193e3daadffc0d4e8.pdf,Pattern recognition: Historical perspective and future directions,2000 +75,FERET,feret,38.83133325,-77.30798839,George Mason University,edu,355af3c3adbb17d25f0d2a4193e3daadffc0d4e8,citation,http://pdfs.semanticscholar.org/355a/f3c3adbb17d25f0d2a4193e3daadffc0d4e8.pdf,Pattern recognition: Historical perspective and future directions,2000 +76,FERET,feret,40.8927159,29.37863323,Sabanci University,edu,1e6d1e811da743df02481bca1a7bdaa73b809913,citation,http://research.sabanciuniv.edu/608/1/3011800001159.pdf,Multimodal person recognition for human-vehicle interaction,2006 +77,FERET,feret,50.7338124,7.1022465,University of Bonn,edu,f4aafb50c93c5ad3e5c4696ed24b063a1932915a,citation,http://pdfs.semanticscholar.org/f4aa/fb50c93c5ad3e5c4696ed24b063a1932915a.pdf,What would you look like in Springfield? Linear Transformations between High-Dimensional Spaces,2011 +78,FERET,feret,45.7413921,126.62552755,Harbin Institute of Technology,edu,10156890bc53cb6be97bd144a68fde693bf13612,citation,http://pdfs.semanticscholar.org/1015/6890bc53cb6be97bd144a68fde693bf13612.pdf,Face Recognition Using Sparse Representation-Based Classification on K-Nearest Subspace,2013 +79,FERET,feret,45.42580475,-75.68740118,University of Ottawa,edu,16820ccfb626dcdc893cc7735784aed9f63cbb70,citation,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W12/papers/Azarmehr_Real-Time_Embedded_Age_2015_CVPR_paper.pdf,Real-time embedded age and gender classification in unconstrained video,2015 +80,FERET,feret,42.3504253,-71.10056114,Boston University,edu,966b76acfa75253679b1a82ecc5a68e523f5c0c9,citation,http://pdfs.semanticscholar.org/f204/2494d5666e436f5e96ff5e0cd3b5f5e5485b.pdf,Preference suppression caused by misattribution of task-irrelevant subliminal motion.,2012 +81,FERET,feret,40.8419836,-73.94368971,Columbia University,edu,0c7f27d23a162d4f3896325d147f412c40160b52,citation,http://pdfs.semanticscholar.org/0c7f/27d23a162d4f3896325d147f412c40160b52.pdf,Models and Algorithms for Vision through the Atmosphere,2003 +82,FERET,feret,40.47913175,-74.43168868,Rutgers University,edu,6069b4bc1a21341b77b49f01341c238c770d52e0,citation,http://pdfs.semanticscholar.org/b02b/50ed995fe526208b1577b9d7ef6262bf3ecf.pdf,Comparing Kernel-based Learning Methods for Face Recognition,2003 +83,FERET,feret,51.49887085,-0.17560797,Imperial College London,edu,af31ef1e81c1132f186d7aebb141d7f59a815010,citation,http://cas.ee.ic.ac.uk/people/ccb98/papers/LiuGlobalSIP13.pdf,Domain-specific progressive sampling of face images,2013 +84,FERET,feret,51.5073219,-0.1276474,"London, United Kingdom",edu,af31ef1e81c1132f186d7aebb141d7f59a815010,citation,http://cas.ee.ic.ac.uk/people/ccb98/papers/LiuGlobalSIP13.pdf,Domain-specific progressive sampling of face images,2013 +85,FERET,feret,39.1254938,-77.22293475,National Institute of Standards and Technology,edu,07f31bef7a7035792e3791473b3c58d03928abbf,citation,https://doi.org/10.1016/j.imavis.2016.08.004,Lessons from collecting a million biometric samples,2015 +86,FERET,feret,41.70456775,-86.23822026,University of Notre Dame,edu,07f31bef7a7035792e3791473b3c58d03928abbf,citation,https://doi.org/10.1016/j.imavis.2016.08.004,Lessons from collecting a million biometric samples,2015 +87,FERET,feret,40.11116745,-88.22587665,"University of Illinois, Urbana-Champaign",edu,d275714c323dd4e400e8003fa8c33070f8ea03d1,citation,https://pdfs.semanticscholar.org/d275/714c323dd4e400e8003fa8c33070f8ea03d1.pdf,"White Fear, Dehumanization and Low Empathy: a Lethal Combination for Shooting Biases by Yara Mekawi",2014 +88,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,1a5a79b4937b89420049bc279a7b7f765d143881,citation,http://pdfs.semanticscholar.org/1a5a/79b4937b89420049bc279a7b7f765d143881.pdf,Are Rich People Perceived as More Trustworthy? Perceived Socioeconomic Status Modulates Judgments of Trustworthiness and Trust Behavior Based on Facial Appearance,2018 +89,FERET,feret,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,1a5a79b4937b89420049bc279a7b7f765d143881,citation,http://pdfs.semanticscholar.org/1a5a/79b4937b89420049bc279a7b7f765d143881.pdf,Are Rich People Perceived as More Trustworthy? Perceived Socioeconomic Status Modulates Judgments of Trustworthiness and Trust Behavior Based on Facial Appearance,2018 +90,FERET,feret,37.548215,-77.45306424,Virginia Commonwealth University,edu,1a5a79b4937b89420049bc279a7b7f765d143881,citation,http://pdfs.semanticscholar.org/1a5a/79b4937b89420049bc279a7b7f765d143881.pdf,Are Rich People Perceived as More Trustworthy? Perceived Socioeconomic Status Modulates Judgments of Trustworthiness and Trust Behavior Based on Facial Appearance,2018 +91,FERET,feret,39.1254938,-77.22293475,National Institute of Standards and Technology,edu,88ee6d0b8342852a5bd55864dc7a1c8452c10bbf,citation,http://pdfs.semanticscholar.org/88ee/6d0b8342852a5bd55864dc7a1c8452c10bbf.pdf,Support Vector Machines Applied to Face Recognition,1998 +92,FERET,feret,32.0565957,118.77408833,Nanjing University,edu,59f83e94a7f52cbb728d434426f6fe85f756259c,citation,https://pdfs.semanticscholar.org/59f8/3e94a7f52cbb728d434426f6fe85f756259c.pdf,An Improved Illumination Normalization Approach based on Wavelet Tranform for Face Recognition from Single Training Image Per Person,2010 +93,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,5d1c4e93e32ee686234c5aae7f38025523993c8c,citation,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d539.pdf,Towards Pose Robust Face Recognition,2013 +94,FERET,feret,34.8452999,48.5596212,Islamic Azad University,edu,53ce84598052308b86ba79d873082853022aa7e9,citation,https://pdfs.semanticscholar.org/4f07/b70883a98a69be3b3e29de06c73e59a9ba0e.pdf,Optimized Method for Real-Time Face Recognition System Based on PCA and Multiclass Support Vector Machine,2013 +95,FERET,feret,22.5611537,88.41310194,Jadavpur University,edu,eef05b87f1a62bf658fc622427187eab4fb0f7a5,citation,http://pdfs.semanticscholar.org/eef0/5b87f1a62bf658fc622427187eab4fb0f7a5.pdf,High Performance Human Face Recognition using Independent High Intensity Gabor Wavelet Responses: A Statistical Approach,2011 +96,FERET,feret,43.66333345,-79.39769975,University of Toronto,edu,f5c285c3729188884f448db3cc60647f15e289d3,citation,http://pdfs.semanticscholar.org/f5c2/85c3729188884f448db3cc60647f15e289d3.pdf,Sorted Index Numbers for Privacy Preserving Face Recognition,2009 +97,FERET,feret,37.4102193,-122.05965487,Carnegie Mellon University,edu,45a3ba54fc2210cf8a4fba0cbdce9dad3cefc826,citation,http://pdfs.semanticscholar.org/45a3/ba54fc2210cf8a4fba0cbdce9dad3cefc826.pdf,Complete Cross-Validation for Nearest Neighbor Classifiers,2000 +98,FERET,feret,51.24303255,-0.59001382,University of Surrey,edu,71e942e05f73b163a7ec814a85ff4131cb48f650,citation,http://pdfs.semanticscholar.org/8f83/e1a0c05da3a2f316b75b4a178fadf709dd68.pdf,The BANCA Database and Evaluation Protocol,2003 +99,FERET,feret,32.0565957,118.77408833,Nanjing University,edu,1fe0c5562c8dffecc0cadeef2c592bfa6e89b5ca,citation,http://cs.boisestate.edu/~dxu/publications/ICTAI04.pdf,Illumination invariant face recognition based on neural network ensemble,2004 +100,FERET,feret,46.897155,-96.81827603,North Dakota State University,edu,1fe0c5562c8dffecc0cadeef2c592bfa6e89b5ca,citation,http://cs.boisestate.edu/~dxu/publications/ICTAI04.pdf,Illumination invariant face recognition based on neural network ensemble,2004 +101,FERET,feret,40.00229045,116.32098908,Tsinghua University,edu,58da4e59c4d259196fc6bd807bc8c36636efa4ef,citation,http://pdfs.semanticscholar.org/58da/4e59c4d259196fc6bd807bc8c36636efa4ef.pdf,Symmetrical PCA in face recognition,2002 +102,FERET,feret,32.0565957,118.77408833,Nanjing University,edu,c901524f01c7a0db3bb01afa1d5828913c84628a,citation,https://cs.nju.edu.cn/zhouzh/zhouzh.files/publication/jcst06.pdf,Image Region Selection and Ensemble for Face Recognition,2006 +103,FERET,feret,37.4102193,-122.05965487,Carnegie Mellon University,edu,221c9fff1c25368a6b72ca679c67a3d6b35e2c00,citation,http://pdfs.semanticscholar.org/5ccb/f66733438ab42fe2da66ad1d37635f4391de.pdf,Memory-Based Face Recognition for Visitor Identification,2000 +104,FERET,feret,42.718568,-84.47791571,Michigan State University,edu,fc798314994bf94d1cde8d615ba4d5e61b6268b6,citation,http://pdfs.semanticscholar.org/fc79/8314994bf94d1cde8d615ba4d5e61b6268b6.pdf,"Face Recognition : face in video , age invariance , and facial marks",2009 +105,FERET,feret,-34.40505545,150.87834655,University of Wollongong,edu,a3bc6020cd57ebe3a82a0b232f969bcc4e372e53,citation,http://pdfs.semanticscholar.org/a3bc/6020cd57ebe3a82a0b232f969bcc4e372e53.pdf,A Hybrid Feature Extraction Technique for Face Recognition,2014 +106,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,13d591220f9fdb22d81c2438a008c80843b61fd4,citation,https://pdfs.semanticscholar.org/13d5/91220f9fdb22d81c2438a008c80843b61fd4.pdf,Boosting Multi-gabor Subspaces for Face Recognition,2006 +107,FERET,feret,22.42031295,114.20788644,Chinese University of Hong Kong,edu,13d591220f9fdb22d81c2438a008c80843b61fd4,citation,https://pdfs.semanticscholar.org/13d5/91220f9fdb22d81c2438a008c80843b61fd4.pdf,Boosting Multi-gabor Subspaces for Face Recognition,2006 +108,FERET,feret,-27.49741805,153.01316956,University of Queensland,edu,621e8882c41cdaf03a2c4a986a6404f0272ba511,citation,https://doi.org/10.1109/IJCNN.2012.6252611,On robust biometric identity verification via sparse encoding of faces: Holistic vs local approaches,2012 +109,FERET,feret,52.2380139,6.8566761,University of Twente,edu,8780f14d04671d4f2ed50307d16062d72cc51863,citation,http://pdfs.semanticscholar.org/8780/f14d04671d4f2ed50307d16062d72cc51863.pdf,Likelihood Ratio-Based Detection of Facial Features,2000 +110,FERET,feret,43.66333345,-79.39769975,University of Toronto,edu,7a52eb0886892c04c6c80b78795d880a70796cb6,citation,http://www.cs.toronto.edu/~jepson/papers/ChennubhotlaJepsonICPR2004.pdf,Perceptual distance normalization for appearance detection,2004 +111,FERET,feret,65.0592157,25.46632601,University of Oulu,edu,1fe121925668743762ce9f6e157081e087171f4c,citation,https://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W02/papers/Ylioinas_Unsupervised_Learning_of_2015_CVPR_paper.pdf,Unsupervised learning of overcomplete face descriptors,2015 +112,FERET,feret,29.5084174,106.57858552,Chongqing University,edu,f3cb97791ded4a5c3bed717f820215a1c9648226,citation,http://pdfs.semanticscholar.org/f3cb/97791ded4a5c3bed717f820215a1c9648226.pdf,Multi-scale Block Weber Local Descriptor for Face Recognition,2015 +113,FERET,feret,38.83133325,-77.30798839,George Mason University,edu,d28d697b578867500632b35b1b19d3d76698f4a9,citation,http://pdfs.semanticscholar.org/d28d/697b578867500632b35b1b19d3d76698f4a9.pdf,Face Recognition Using Shape and Texture,1999 +114,FERET,feret,58.38131405,26.72078081,University of Tartu,edu,5a5ae31263517355d15b7b09d74cb03e40093046,citation,http://pdfs.semanticscholar.org/5a5a/e31263517355d15b7b09d74cb03e40093046.pdf,Super Resolution and Face Recognition Based People Activity Monitoring Enhancement Using Surveillance Camera,2016 +115,FERET,feret,32.0565957,118.77408833,Nanjing University,edu,82524c49ea20390c711e0606e50570ac2183c281,citation,http://pdfs.semanticscholar.org/8252/4c49ea20390c711e0606e50570ac2183c281.pdf,(2D)PCA: 2-Directional 2-Dimensional PCA for Efficient Face Representation and Recognition,2005 +116,FERET,feret,38.99203005,-76.9461029,University of Maryland College Park,edu,b13a882e6168afc4058fe14cc075c7e41434f43e,citation,http://pdfs.semanticscholar.org/b13a/882e6168afc4058fe14cc075c7e41434f43e.pdf,Recognition of Humans and Their Activities Using Video,2005 +117,FERET,feret,39.2899685,-76.62196103,University of Maryland,edu,b13a882e6168afc4058fe14cc075c7e41434f43e,citation,http://pdfs.semanticscholar.org/b13a/882e6168afc4058fe14cc075c7e41434f43e.pdf,Recognition of Humans and Their Activities Using Video,2005 +118,FERET,feret,32.9820799,-96.7566278,University of Texas at Dallas,edu,ac9516a589901f1421e8ce905dd8bc5b689317ca,citation,http://pdfs.semanticscholar.org/ac95/16a589901f1421e8ce905dd8bc5b689317ca.pdf,A Practical Framework for Executing Complex Queries over Encrypted Multimedia Data,2016 +119,FERET,feret,42.357757,-83.06286711,Wayne State University,edu,cd0503a31a9f9040736ccfb24086dc934508cfc7,citation,http://pdfs.semanticscholar.org/cd05/03a31a9f9040736ccfb24086dc934508cfc7.pdf,Maximizing Resource Utilization In Video Streaming Systems,2015 +120,FERET,feret,47.5612651,7.5752961,University of Basel,edu,183c10b7d9ff26576e13a6639de0f7af206ed058,citation,http://gravis.cs.unibas.ch/publications/CVPR05_Blanz.pdf,Face recognition based on frontal views generated from non-frontal images,2005 +121,FERET,feret,1.3461952,103.6815499,"Nanyang Technological University, Singapore",edu,96d34c1a749e74af0050004162d9dc5132098a79,citation,https://doi.org/10.1109/TNN.2005.844909,High-speed face recognition based on discrete cosine transform and RBF neural networks,2005 +122,FERET,feret,41.10427915,29.02231159,Istanbul Technical University,edu,559645d2447004355c83737a19c9a811b45780f1,citation,https://doi.org/10.1109/ICB.2015.7139114,Combining view-based pose normalization and feature transform for cross-pose face recognition,2015 +123,FERET,feret,49.10184375,8.4331256,Karlsruhe Institute of Technology,edu,559645d2447004355c83737a19c9a811b45780f1,citation,https://doi.org/10.1109/ICB.2015.7139114,Combining view-based pose normalization and feature transform for cross-pose face recognition,2015 +124,FERET,feret,46.5184121,6.5684654,École Polytechnique Fédérale de Lausanne,edu,559645d2447004355c83737a19c9a811b45780f1,citation,https://doi.org/10.1109/ICB.2015.7139114,Combining view-based pose normalization and feature transform for cross-pose face recognition,2015 +125,FERET,feret,40.00229045,116.32098908,Tsinghua University,edu,bc866c2ced533252f29cf2111dd71a6d1724bd49,citation,http://pdfs.semanticscholar.org/bc86/6c2ced533252f29cf2111dd71a6d1724bd49.pdf,A Multi-Modal Face Recognition Method Using Complete Local Derivative Patterns and Depth Maps,2014 +126,FERET,feret,42.718568,-84.47791571,Michigan State University,edu,63a584487beb7382cad8ed70020f108ded5bf076,citation,https://pdfs.semanticscholar.org/2bb3/4f45b1f0ae2b602a6f25f1966cd0f84e3f5f.pdf,Face Detection and Modeling for Recognition,2002 +127,FERET,feret,22.5611537,88.41310194,Jadavpur University,edu,5e6c23d2e2f92a90bd35bdbc937b2d7d95ee2d55,citation,http://pdfs.semanticscholar.org/5e6c/23d2e2f92a90bd35bdbc937b2d7d95ee2d55.pdf,Fusion of Wavelet Coefficients from Visual and Thermal Face Images for Human Face Recognition - A Comparative Study,2006 +128,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,c03e01717b2d93f04cce9b5fd2dcfd1143bcc180,citation,http://pdfs.semanticscholar.org/c03e/01717b2d93f04cce9b5fd2dcfd1143bcc180.pdf,Locality-Constrained Active Appearance Model,2012 +129,FERET,feret,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,c03e01717b2d93f04cce9b5fd2dcfd1143bcc180,citation,http://pdfs.semanticscholar.org/c03e/01717b2d93f04cce9b5fd2dcfd1143bcc180.pdf,Locality-Constrained Active Appearance Model,2012 +130,FERET,feret,37.4102193,-122.05965487,Carnegie Mellon University,edu,4d15254f6f31356963cc70319ce416d28d8924a3,citation,http://pdfs.semanticscholar.org/4d15/254f6f31356963cc70319ce416d28d8924a3.pdf,Quo vadis Face Recognition?,2001 +131,FERET,feret,13.65450525,100.49423171,Robotics Institute,edu,4d15254f6f31356963cc70319ce416d28d8924a3,citation,http://pdfs.semanticscholar.org/4d15/254f6f31356963cc70319ce416d28d8924a3.pdf,Quo vadis Face Recognition?,2001 +132,FERET,feret,40.44415295,-79.96243993,University of Pittsburgh,edu,4d15254f6f31356963cc70319ce416d28d8924a3,citation,http://pdfs.semanticscholar.org/4d15/254f6f31356963cc70319ce416d28d8924a3.pdf,Quo vadis Face Recognition?,2001 +133,FERET,feret,33.776033,-84.39884086,Georgia Institute of Technology,edu,1dad684de1ce4c013ba04eb4b1a70355b3786ecd,citation,http://pdfs.semanticscholar.org/933d/06908b782279b1127c9ba498d868b26ffe8e.pdf,Computers Seeing People,1999 +134,FERET,feret,22.5611537,88.41310194,Jadavpur University,edu,52909a123ba3b088a5a93d930dcd029ec2f1f24f,citation,http://pdfs.semanticscholar.org/5d05/a0deec42a061541bbd399bc9e40d4ad3374a.pdf,A Gabor-Block-Based Kernel Discriminative Common Vector Approach Using Cosine Kernels for Human Face Recognition,2012 +135,FERET,feret,35.14479945,33.90492318,Eastern Mediterranean University,edu,b374391ab793a1bb2ecde4df51be9d97c2cbf79a,citation,https://pdfs.semanticscholar.org/b374/391ab793a1bb2ecde4df51be9d97c2cbf79a.pdf,Improved PCA based Face Recognition using Feature based Classifier Ensemble,2015 +136,FERET,feret,42.3573046,-71.0582415,"Affectiva, Inc.",company,d10cfcf206b0991e3bc20ac28df1f61c63516f30,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553776,Smile or smirk? Automatic detection of spontaneous asymmetric smiles to understand viewer experience,2013 +137,FERET,feret,32.8536333,-117.2035286,Kyung Hee University,edu,bf0836e5c10add0b13005990ba019a9c4b744b06,citation,https://doi.org/10.1109/TCE.2009.5373791,An enhanced independent component-based human facial expression recognition from video,2009 +138,FERET,feret,42.718568,-84.47791571,Michigan State University,edu,051f03bc25ec633592aa2ff5db1d416b705eac6c,citation,http://www.cse.msu.edu/biometrics/Publications/Face/LiaoJain_PartialFR_AlignmentFreeApproach_ICJB11.pdf,Partial face recognition: An alignment free approach,2011 +139,FERET,feret,36.3697191,127.362537,Korea Advanced Institute of Science and Technology,edu,aba31184783150c723805831cde0f22fe257b835,citation,http://pdfs.semanticscholar.org/aba3/1184783150c723805831cde0f22fe257b835.pdf,Contribution of Non-scrambled Chroma Information in Privacy-Protected Face Images to Privacy Leakage,2011 +140,FERET,feret,43.66333345,-79.39769975,University of Toronto,edu,aba31184783150c723805831cde0f22fe257b835,citation,http://pdfs.semanticscholar.org/aba3/1184783150c723805831cde0f22fe257b835.pdf,Contribution of Non-scrambled Chroma Information in Privacy-Protected Face Images to Privacy Leakage,2011 +141,FERET,feret,40.47913175,-74.43168868,Rutgers University,edu,7ef41e2be5116912fe8a4906b4fb89ac9dcf819d,citation,http://doi.ieeecomputersociety.org/10.1109/ICPR.2004.1334492,A hybrid face recognition method using Markov random fields,2004 +142,FERET,feret,38.83133325,-77.30798839,George Mason University,edu,757e4cb981e807d83539d9982ad325331cb59b16,citation,http://pdfs.semanticscholar.org/757e/4cb981e807d83539d9982ad325331cb59b16.pdf,Demographics versus Biometric Automatic Interoperability,2013 +143,FERET,feret,41.9037626,12.5144384,Sapienza University of Rome,edu,757e4cb981e807d83539d9982ad325331cb59b16,citation,http://pdfs.semanticscholar.org/757e/4cb981e807d83539d9982ad325331cb59b16.pdf,Demographics versus Biometric Automatic Interoperability,2013 +144,FERET,feret,49.2622421,-123.2450052,University of Perugia,edu,67c08e2b8b918a61dcbd0d4c63a74b89b833d259,citation,http://pdfs.semanticscholar.org/67c0/8e2b8b918a61dcbd0d4c63a74b89b833d259.pdf,Multi-class texture analysis in colorectal cancer histology,2016 +145,FERET,feret,38.83133325,-77.30798839,George Mason University,edu,ac942c4870e55fe1d9822d62edcdb685d41cd2bf,citation,http://pdfs.semanticscholar.org/ac94/2c4870e55fe1d9822d62edcdb685d41cd2bf.pdf,Pose Discriminiation and Eye Detection Using Support Vector Machines (SVM),1998 +146,FERET,feret,44.97308605,-93.23708813,University of Minnesota,edu,ac942c4870e55fe1d9822d62edcdb685d41cd2bf,citation,http://pdfs.semanticscholar.org/ac94/2c4870e55fe1d9822d62edcdb685d41cd2bf.pdf,Pose Discriminiation and Eye Detection Using Support Vector Machines (SVM),1998 +147,FERET,feret,51.4584837,-2.6097752,University of Bristol,edu,a632ebe6f1e7d9b2b652b0186abef8db218037f3,citation,http://pdfs.semanticscholar.org/a632/ebe6f1e7d9b2b652b0186abef8db218037f3.pdf,Subliminally and Supraliminally Acquired Long-Term Memories Jointly Bias Delayed Decisions,2017 +148,FERET,feret,32.8536333,-117.2035286,Kyung Hee University,edu,027f769aed0cfcb3169ef60f182ce1decc0e99eb,citation,http://www.ijicic.org/10-12018-1.pdf,Local Directional Pattern (LDP) for face recognition,2010 +149,FERET,feret,32.0565957,118.77408833,Nanjing University,edu,edd6ed94207ab614c71ac0591d304a708d708e7b,citation,http://doi.org/10.1016/j.neucom.2012.02.001,Reconstructive discriminant analysis: A feature extraction method induced from linear regression classification,2012 +150,FERET,feret,42.718568,-84.47791571,Michigan State University,edu,5dbf772b98cb944befa9cf01ec5d15da713a338b,citation,http://pdfs.semanticscholar.org/9d82/44d5a32ecc314860c1d673d687df28f77d84.pdf,Face modeling for recognition,2001 +151,FERET,feret,32.1119889,34.80459702,Tel Aviv University,edu,8356b642e4e9bb39bd26ea6c4b9bad21bd9b1912,citation,http://pdfs.semanticscholar.org/8356/b642e4e9bb39bd26ea6c4b9bad21bd9b1912.pdf,Seeing People in the Dark: Face Recognition in Infrared Images,2002 +152,FERET,feret,43.66333345,-79.39769975,University of Toronto,edu,2b73e3d541b0208ae54b3920fef4bfd9fd0c84a7,citation,http://pdfs.semanticscholar.org/2b73/e3d541b0208ae54b3920fef4bfd9fd0c84a7.pdf,Feature-based face representations and image reconstruction from behavioral and neural data.,2016 +153,FERET,feret,37.4102193,-122.05965487,Carnegie Mellon University,edu,2b73e3d541b0208ae54b3920fef4bfd9fd0c84a7,citation,http://pdfs.semanticscholar.org/2b73/e3d541b0208ae54b3920fef4bfd9fd0c84a7.pdf,Feature-based face representations and image reconstruction from behavioral and neural data.,2016 +154,FERET,feret,42.3583961,-71.09567788,MIT,edu,2b73e3d541b0208ae54b3920fef4bfd9fd0c84a7,citation,http://pdfs.semanticscholar.org/2b73/e3d541b0208ae54b3920fef4bfd9fd0c84a7.pdf,Feature-based face representations and image reconstruction from behavioral and neural data.,2016 +155,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,1e8d0998c69caf6e9495db1d6df562f8b9e90003,citation,http://pdfs.semanticscholar.org/1e8d/0998c69caf6e9495db1d6df562f8b9e90003.pdf,Solving the Small Sample Size Problem of LDA,2002 +156,FERET,feret,32.87935255,-117.23110049,"University of California, San Diego",edu,99b93f67c3b2b0a474bf5670a7dd40a6a0e849ac,citation,http://pdfs.semanticscholar.org/99b9/3f67c3b2b0a474bf5670a7dd40a6a0e849ac.pdf,NIMBLER: A Model of Visual Attention and Object Recognition With a Biologically Plausible Retina,2007 +157,FERET,feret,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,9729930ab0f9cbcd07f1105bc69c540330cda50a,citation,https://doi.org/10.1109/ACCESS.2017.2749331,Compressing Fisher Vector for Robust Face Recognition,2017 +158,FERET,feret,31.32235655,121.38400941,Shanghai University,edu,459eb3cfd9b52a0d416571e4bc4e75f979f4b901,citation,https://doi.org/10.1109/ROBIO.2015.7418998,Vision development of humanoid head robot SHFR-III,2015 +159,FERET,feret,34.8452999,48.5596212,Islamic Azad University,edu,14b2dff604f148c4e5b54aa25fbecbf7f9071205,citation,http://www.iranprc.org/pdf/paper/2011-06.pdf,A new preselection method for face recognition in JPEG domain based on face segmentation,2011 +160,FERET,feret,47.5612651,7.5752961,University of Basel,edu,ff47698be7313005d0ea0fe0cc72c13f2f4b092a,citation,http://pdfs.semanticscholar.org/ff47/698be7313005d0ea0fe0cc72c13f2f4b092a.pdf,Caring or daring? Exploring the impact of facial masculinity/femininity and gender category information on first impressions,2017 +161,FERET,feret,35.84658875,127.1350133,Chonbuk National University,edu,0c6a18b0cee01038eb1f9373c369835b236373ae,citation,https://doi.org/10.1007/s11042-017-4359-9,Learning warps based similarity for pose-unconstrained face recognition,2017 +162,FERET,feret,40.72925325,-73.99625394,New York University,edu,54e6343f4368d9e5468c3e83b6eeb3a58a3c7555,citation,http://pdfs.semanticscholar.org/54e6/343f4368d9e5468c3e83b6eeb3a58a3c7555.pdf,Reconstructing Perceived and Retrieved Faces from Activity Patterns in Lateral Parietal Cortex.,2016 +163,FERET,feret,38.8833413,-77.1045977,DARPA,mil,00d6e5a1b347463f6aeb08a10cd912273c9d1347,citation,http://pdfs.semanticscholar.org/00d6/e5a1b347463f6aeb08a10cd912273c9d1347.pdf,Face Recognition Vendor Test 2002 : Evaluation Report,2003 +164,FERET,feret,39.1254938,-77.22293475,National Institute of Standards and Technology,edu,00d6e5a1b347463f6aeb08a10cd912273c9d1347,citation,http://pdfs.semanticscholar.org/00d6/e5a1b347463f6aeb08a10cd912273c9d1347.pdf,Face Recognition Vendor Test 2002 : Evaluation Report,2003 +165,FERET,feret,41.5007811,2.11143663,Universitat Autònoma de Barcelona,edu,dc964b9c7242a985eb255b2410a9c45981c2f4d0,citation,http://doi.org/10.1007/s10851-018-0837-6,Feature Extraction by Using Dual-Generalized Discriminative Common Vectors,2018 +166,FERET,feret,40.00229045,116.32098908,Tsinghua University,edu,8023864256a1a4a26e130a7165f3d70875c27467,citation,http://pdfs.semanticscholar.org/8023/864256a1a4a26e130a7165f3d70875c27467.pdf,LUT-Based Adaboost for Gender Classification,2003 +167,FERET,feret,47.05821,15.46019568,Graz University of Technology,edu,2a77e3221d0512aa5674cf6f9041c1ce81fc07f0,citation,http://pdfs.semanticscholar.org/65de/08bab21921fba39e97f0bc3585f62cb2bd5d.pdf,An Automatic Hybrid Segmentation Approach for Aligned Face Portrait Images,2009 +168,FERET,feret,51.5231607,-0.1282037,University College London,edu,aff92784567095ee526a705e21be4f42226bbaab,citation,http://pdfs.semanticscholar.org/aff9/2784567095ee526a705e21be4f42226bbaab.pdf,Face recognition in uncontrolled environments,2015 +169,FERET,feret,47.79475945,13.05417525,University of Salzburg,edu,a7d7fba176e442f60899c57b976ae6de6d013ceb,citation,http://pdfs.semanticscholar.org/a7d7/fba176e442f60899c57b976ae6de6d013ceb.pdf,Gender differences in experiential and facial reactivity to approval and disapproval during emotional social interactions,2015 +170,FERET,feret,52.3553655,4.9501644,University of Amsterdam,edu,a7d7fba176e442f60899c57b976ae6de6d013ceb,citation,http://pdfs.semanticscholar.org/a7d7/fba176e442f60899c57b976ae6de6d013ceb.pdf,Gender differences in experiential and facial reactivity to approval and disapproval during emotional social interactions,2015 +171,FERET,feret,35.6902784,139.69540096,Kogakuin University,edu,ca50b25eaad0c9146fc5a4a2cd4c472c77b970ba,citation,http://pdfs.semanticscholar.org/ca50/b25eaad0c9146fc5a4a2cd4c472c77b970ba.pdf,Face Recognition Using Histogram-based Features in Spatial and Frequency Domains,2013 +172,FERET,feret,38.2530945,140.8736593,Tohoku University,edu,ca50b25eaad0c9146fc5a4a2cd4c472c77b970ba,citation,http://pdfs.semanticscholar.org/ca50/b25eaad0c9146fc5a4a2cd4c472c77b970ba.pdf,Face Recognition Using Histogram-based Features in Spatial and Frequency Domains,2013 +173,FERET,feret,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,79dc9a1aa2ab7fa46e8024bd654a4a5776c1a6d6,citation,http://mmlab.siat.ac.cn/sfchen-old/Publications/ICIA11-3Dtracking.pdf,Robust non-rigid 3D tracking for face recognition in real-world videos,2011 +174,FERET,feret,22.42031295,114.20788644,Chinese University of Hong Kong,edu,79dc9a1aa2ab7fa46e8024bd654a4a5776c1a6d6,citation,http://mmlab.siat.ac.cn/sfchen-old/Publications/ICIA11-3Dtracking.pdf,Robust non-rigid 3D tracking for face recognition in real-world videos,2011 +175,FERET,feret,43.7743911,-79.50481085,York University,edu,ffe4bb47ec15f768e1744bdf530d5796ba56cfc1,citation,https://arxiv.org/pdf/1706.04277.pdf,AFIF4: Deep Gender Classification based on AdaBoost-based Fusion of Isolated Facial Features and Foggy Faces,2017 +176,FERET,feret,27.18794105,31.17009498,Assiut University,edu,ffe4bb47ec15f768e1744bdf530d5796ba56cfc1,citation,https://arxiv.org/pdf/1706.04277.pdf,AFIF4: Deep Gender Classification based on AdaBoost-based Fusion of Isolated Facial Features and Foggy Faces,2017 +177,FERET,feret,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,cd6aaa37fffd0b5c2320f386be322b8adaa1cc68,citation,https://arxiv.org/pdf/1804.06655.pdf,Deep Face Recognition: A Survey,2018 +178,FERET,feret,32.0565957,118.77408833,Nanjing University,edu,f2ad9b43bac8c2bae9dea694f6a4e44c760e63da,citation,http://pdfs.semanticscholar.org/f2ad/9b43bac8c2bae9dea694f6a4e44c760e63da.pdf,A Study on Illumination Invariant Face Recognition Methods Based on Multiple Eigenspaces,2005 +179,FERET,feret,46.897155,-96.81827603,North Dakota State University,edu,f2ad9b43bac8c2bae9dea694f6a4e44c760e63da,citation,http://pdfs.semanticscholar.org/f2ad/9b43bac8c2bae9dea694f6a4e44c760e63da.pdf,A Study on Illumination Invariant Face Recognition Methods Based on Multiple Eigenspaces,2005 +180,FERET,feret,33.776033,-84.39884086,Georgia Institute of Technology,edu,933d06908b782279b1127c9ba498d868b26ffe8e,citation,https://pdfs.semanticscholar.org/933d/06908b782279b1127c9ba498d868b26ffe8e.pdf,Computers Seeing People,1999 +181,FERET,feret,32.87935255,-117.23110049,"University of California, San Diego",edu,9e31e77f9543ab42474ba4e9330676e18c242e72,citation,https://arxiv.org/pdf/1807.11649.pdf,The Devil of Face Recognition is in the Noise,2018 +182,FERET,feret,1.3484104,103.68297965,Nanyang Technological University,edu,9e31e77f9543ab42474ba4e9330676e18c242e72,citation,https://arxiv.org/pdf/1807.11649.pdf,The Devil of Face Recognition is in the Noise,2018 +183,FERET,feret,65.0592157,25.46632601,University of Oulu,edu,769d1a0aff0cf7842c7861d30ce654a029d6b467,citation,http://pdfs.semanticscholar.org/769d/1a0aff0cf7842c7861d30ce654a029d6b467.pdf,Descriptor Learning Based on Fisher Separation Criterion for Texture Classification,2010 +184,FERET,feret,31.30104395,121.50045497,Fudan University,edu,380862d22617064ffab1a3b42f0b11752d6bd785,citation,http://pdfs.semanticscholar.org/3808/62d22617064ffab1a3b42f0b11752d6bd785.pdf,Recognition from a Single Sample per Person with Multiple SOM Fusion,2006 +185,FERET,feret,41.98676415,20.96254516,South East European University,edu,f2cc459ada3abd9d8aa82e92710676973aeff275,citation,http://ieeexplore.ieee.org/document/5967185/,Object class recognition using range of multiple computer vision algorithms,2011 +186,FERET,feret,49.25839375,-123.24658161,University of British Columbia,edu,4bc55ffc2f53801267ca1767028515be6e84f551,citation,http://pdfs.semanticscholar.org/4bc5/5ffc2f53801267ca1767028515be6e84f551.pdf,The Decision to Engage Cognitive Control Is Driven by Expected Reward-Value: Neural and Behavioral Evidence,2012 +187,FERET,feret,28.54632595,77.27325504,Indian Institute of Technology Delhi,edu,0fae5d9d2764a8d6ea691b9835d497dd680bbccd,citation,http://pdfs.semanticscholar.org/0fae/5d9d2764a8d6ea691b9835d497dd680bbccd.pdf,Face Recognition using Canonical Correlation Analysis,2006 +188,FERET,feret,22.42031295,114.20788644,Chinese University of Hong Kong,edu,71ed20748c919cd261024b146992ced4c9c2157b,citation,http://mmlab.ie.cuhk.edu.hk/archive/2006/01640756.pdf,Learning Semantic Patterns with Discriminant Localized Binary Projections,2006 +189,FERET,feret,39.977217,116.337632,Microsoft Research Asia,company,71ed20748c919cd261024b146992ced4c9c2157b,citation,http://mmlab.ie.cuhk.edu.hk/archive/2006/01640756.pdf,Learning Semantic Patterns with Discriminant Localized Binary Projections,2006 +190,FERET,feret,40.11571585,-88.22750772,Beckman Institute,edu,71ed20748c919cd261024b146992ced4c9c2157b,citation,http://mmlab.ie.cuhk.edu.hk/archive/2006/01640756.pdf,Learning Semantic Patterns with Discriminant Localized Binary Projections,2006 +191,FERET,feret,51.24303255,-0.59001382,University of Surrey,edu,9103148dd87e6ff9fba28509f3b265e1873166c9,citation,http://pdfs.semanticscholar.org/9103/148dd87e6ff9fba28509f3b265e1873166c9.pdf,Face Analysis using 3D Morphable Models,2015 +192,FERET,feret,33.30715065,-111.67653157,Arizona State University,edu,80cef64706957c53a31b67045d208efe39205c9e,citation,http://pdfs.semanticscholar.org/80ce/f64706957c53a31b67045d208efe39205c9e.pdf,Deficits in other-race face recognition: no evidence for encoding-based effects.,2009 +193,FERET,feret,51.44415765,7.26096541,Ruhr-University Bochum,edu,ce0aa94c79f60c35073f434a7fd6987180f81527,citation,http://pdfs.semanticscholar.org/ce0a/a94c79f60c35073f434a7fd6987180f81527.pdf,Achieving Anonymity against Major Face Recognition Algorithms,2013 +194,FERET,feret,-33.3578899,151.37834708,University of Newcastle,edu,eb6f15c59e6f2ffaa9a0a55d3f045c23a5a6d275,citation,http://pdfs.semanticscholar.org/eb6f/15c59e6f2ffaa9a0a55d3f045c23a5a6d275.pdf,State-Trace Analysis of the Face Inversion Effect,2009 +195,FERET,feret,51.5231607,-0.1282037,University College London,edu,db3e78704df982b2af92282e4a74aa3b59ea3a2e,citation,https://pdfs.semanticscholar.org/1e69/9d9e0470c5d39ff78eaf21b394a90691c513.pdf,A recurrent dynamic model for correspondence-based face recognition.,2008 +196,FERET,feret,38.2530945,140.8736593,Tohoku University,edu,7589bded8fed54d6eb7800d24ace662b37ed0779,citation,https://pdfs.semanticscholar.org/7589/bded8fed54d6eb7800d24ace662b37ed0779.pdf,Face Recognition Algorithm Using Muti-direction Markov Stationary Features and Adjacent Pixel Intensity Difference Quantization Histogram,2012 +197,FERET,feret,24.18005755,120.64836072,Feng Chia University,edu,344a5802999dddd0a6d1c4d511910af2eb922231,citation,http://pdfs.semanticscholar.org/f0ba/552418698d1b881c6f9f02e2c84f969e66f3.pdf,DroneFace: An Open Dataset for Drone Research,2017 +198,FERET,feret,65.0592157,25.46632601,University of Oulu,edu,7c87f445a15597f603756587e0f9b8cf4d942ecc,citation,http://pdfs.semanticscholar.org/7c87/f445a15597f603756587e0f9b8cf4d942ecc.pdf,Analysis of Sampling Techniques for Learning Binarized Statistical Image Features Using Fixations and Salience,2014 +199,FERET,feret,40.00229045,116.32098908,Tsinghua University,edu,04e06481e455c6eb838c22e8505dafc01b7d0cfa,citation,http://mmlab.ie.cuhk.edu.hk/archive/2008/L1.pdf,L<inf>1</inf> regularized projection pursuit for additive model learning,2008 +200,FERET,feret,41.70456775,-86.23822026,University of Notre Dame,edu,841855205818d3a6d6f85ec17a22515f4f062882,citation,https://arxiv.org/pdf/1805.11529.pdf,Low Resolution Face Recognition in the Wild,2018 +201,FERET,feret,42.3583961,-71.09567788,MIT,edu,05bd6c2bc5dc6d65c48c6366788441bcfdd9db3a,citation,http://pdfs.semanticscholar.org/05bd/6c2bc5dc6d65c48c6366788441bcfdd9db3a.pdf,Personalizing Smart Environments: Face Recognition for Human Interaction,1999 +202,FERET,feret,51.0784038,-114.1287077,University of Calgary,edu,9902acd6ce7662c93ee2bd41c6c11a6b99ad8754,citation,https://pdfs.semanticscholar.org/9902/acd6ce7662c93ee2bd41c6c11a6b99ad8754.pdf,Robust Multimodal Biometric System using Markov Chain based Rank Level Fusion,2010 +203,FERET,feret,-27.47715625,153.02841004,Queensland University of Technology,edu,6342a4c54835c1e14159495373ab18b4233d2d9b,citation,http://pdfs.semanticscholar.org/6342/a4c54835c1e14159495373ab18b4233d2d9b.pdf,Towards Pose-robust Face Recognition on Video,2014 +204,FERET,feret,54.98023235,-1.61452627,Newcastle University,edu,241416b1249d2b71b373f8dcf054110d579a2148,citation,http://pdfs.semanticscholar.org/2414/16b1249d2b71b373f8dcf054110d579a2148.pdf,Biometric face recognition using multilinear projection and artificial intelligence,2013 +205,FERET,feret,32.0565957,118.77408833,Nanjing University,edu,001d909eb3513fb6fad8fb2355971441255458c3,citation,http://mplab.ucsd.edu/wordpress/wp-content/uploads/CVPR2008/Conference/data/papers/023.pdf,Minimal local reconstruction error measure based discriminant feature extraction and classification,2008 +206,FERET,feret,42.357757,-83.06286711,Wayne State University,edu,95d567081510e8e59834febc958668015c174602,citation,http://pdfs.semanticscholar.org/95d5/67081510e8e59834febc958668015c174602.pdf,Combining Gabor features: summing vs. voting in human face recognition,2003 +207,FERET,feret,45.7835966,4.7678948,École Centrale de Lyon,edu,e984017c5849ea78e3f50e374a5539770989536d,citation,http://pdfs.semanticscholar.org/e984/017c5849ea78e3f50e374a5539770989536d.pdf,Bilinear Discriminant Analysis for Face Recognition,2005 +208,FERET,feret,39.00041165,-77.10327775,National Institutes of Health,edu,b313751548018e4ecd5ae2ce6b3b94fbd9cae33e,citation,http://doi.org/10.1007/s11263-008-0143-7,Evaluation of Face Datasets as Tools for Assessing the Performance of Face Recognition Methods,2008 +209,FERET,feret,45.5039761,-73.5749687,McGill University,edu,ed9d11e995baeec17c5d2847ec1a8d5449254525,citation,https://pdfs.semanticscholar.org/ed9d/11e995baeec17c5d2847ec1a8d5449254525.pdf,Efficient Gender Classification Using a Deep LDA-Pruned Net,2017 +210,FERET,feret,51.49887085,-0.17560797,Imperial College London,edu,385fa8768d174a9044bc723548a7f8810a62606c,citation,http://pdfs.semanticscholar.org/385f/a8768d174a9044bc723548a7f8810a62606c.pdf,Using an holistic method based on prior information to represent global and local variations on face images,2014 +211,FERET,feret,-35.2776999,149.118527,Australian National University,edu,826f1ac8ef16abd893062fdf5058a09881aed516,citation,https://arxiv.org/pdf/1801.02279.pdf,Identity-Preserving Face Recovery from Portraits,2018 +212,FERET,feret,36.20304395,117.05842113,Tianjin University,edu,1d5219687b9e63767f19cd804147c256c5a5a3bc,citation,https://pdfs.semanticscholar.org/1d52/19687b9e63767f19cd804147c256c5a5a3bc.pdf,Patch-based locality-enhanced collaborative representation for face recognition,2015 +213,FERET,feret,43.66333345,-79.39769975,University of Toronto,edu,c423b0a0b7232a5cd0c3f4c75164923a3f04cdcd,citation,http://pdfs.semanticscholar.org/c423/b0a0b7232a5cd0c3f4c75164923a3f04cdcd.pdf,Kernel Discriminant Learning with Application to Face Recognition,2004 +214,FERET,feret,38.83133325,-77.30798839,George Mason University,edu,a1c1970f7c728cc96aea798d65d38df7c9ea61dc,citation,http://pdfs.semanticscholar.org/a1c1/970f7c728cc96aea798d65d38df7c9ea61dc.pdf,Eye Location Using Genetic Algorithm,1999 +215,FERET,feret,47.05821,15.46019568,Graz University of Technology,edu,e121bf6f18e1cb114216a521df63c55030d10fbe,citation,http://pdfs.semanticscholar.org/e121/bf6f18e1cb114216a521df63c55030d10fbe.pdf,Robust Facial Component Detection for Face Alignment Applications,2009 +216,FERET,feret,32.0565957,118.77408833,Nanjing University,edu,bc6011807fadc2d3e6bc97bb2c2ecee5ec1b64a8,citation,http://pdfs.semanticscholar.org/bc60/11807fadc2d3e6bc97bb2c2ecee5ec1b64a8.pdf,Robust Face Recognition from a Single Training Image per Person with Kernel-Based SOM-Face,2004 +217,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,bc6011807fadc2d3e6bc97bb2c2ecee5ec1b64a8,citation,http://pdfs.semanticscholar.org/bc60/11807fadc2d3e6bc97bb2c2ecee5ec1b64a8.pdf,Robust Face Recognition from a Single Training Image per Person with Kernel-Based SOM-Face,2004 +218,FERET,feret,51.24303255,-0.59001382,University of Surrey,edu,09ef369754fccb530e658b8331c405867c0d45a6,citation,http://pdfs.semanticscholar.org/09ef/369754fccb530e658b8331c405867c0d45a6.pdf,Comparison of Face Verification Results on the XM2VTS Database,2000 +219,FERET,feret,40.62984145,22.9588935,Aristotle University of Thessaloniki,edu,09ef369754fccb530e658b8331c405867c0d45a6,citation,http://pdfs.semanticscholar.org/09ef/369754fccb530e658b8331c405867c0d45a6.pdf,Comparison of Face Verification Results on the XM2VTS Database,2000 +220,FERET,feret,-33.88890695,151.18943366,University of Sydney,edu,09ef369754fccb530e658b8331c405867c0d45a6,citation,http://pdfs.semanticscholar.org/09ef/369754fccb530e658b8331c405867c0d45a6.pdf,Comparison of Face Verification Results on the XM2VTS Database,2000 +221,FERET,feret,47.3764534,8.54770931,ETH Zürich,edu,09ef369754fccb530e658b8331c405867c0d45a6,citation,http://pdfs.semanticscholar.org/09ef/369754fccb530e658b8331c405867c0d45a6.pdf,Comparison of Face Verification Results on the XM2VTS Database,2000 +222,FERET,feret,22.3386304,114.2620337,Hong Kong University of Science and Technology,edu,ca2e14671f5043dab985dd18e10c5e3f51e2e8be,citation,http://pdfs.semanticscholar.org/ca2e/14671f5043dab985dd18e10c5e3f51e2e8be.pdf,Face Recognition by Using Elongated Local Binary Patterns with Average Maximum Distance Gradient Magnitude,2007 +223,FERET,feret,34.0224149,-118.28634407,University of Southern California,edu,edc6d96ae195897b33c07f5fa428149915b4cf6a,citation,http://pdfs.semanticscholar.org/edc6/d96ae195897b33c07f5fa428149915b4cf6a.pdf,Face Pose Estimation System by Combining Hybrid Ica-svm Learning and 3d Modeling,2003 +224,FERET,feret,35.14479945,33.90492318,Eastern Mediterranean University,edu,f65ff9d6d0025f198ac4f924d2f0df121bc51c67,citation,http://pdfs.semanticscholar.org/f65f/f9d6d0025f198ac4f924d2f0df121bc51c67.pdf,Overlapping on Partitioned Facial Images,2006 +225,FERET,feret,47.5612651,7.5752961,University of Basel,edu,916498961a51f56a592c3551b0acc25978571fa7,citation,http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126275,Optimal landmark detection using shape models and branch and bound,2011 +226,FERET,feret,35.9023226,14.4834189,University of Malta,edu,4efd58102ff46b7435c9ec6d4fc3dd21d93b15b4,citation,https://doi.org/10.1109/TIFS.2017.2788002,"Matching Software-Generated Sketches to Face Photographs With a Very Deep CNN, Morphed Faces, and Transfer Learning",2018 +227,FERET,feret,24.7246403,46.62335012,King Saud University,edu,e26a7e343fe109e2b52d1eeea5b02dae836f3502,citation,https://doi.org/10.1109/ACCESS.2017.2676238,Facial Expression Recognition Utilizing Local Direction-Based Robust Features and Deep Belief Network,2017 +228,FERET,feret,59.93891665,10.72170765,University of Oslo,edu,e26a7e343fe109e2b52d1eeea5b02dae836f3502,citation,https://doi.org/10.1109/ACCESS.2017.2676238,Facial Expression Recognition Utilizing Local Direction-Based Robust Features and Deep Belief Network,2017 +229,FERET,feret,31.9078499,34.81334092,Weizmann Institute of Science,edu,4cb8a691a15e050756640c0a35880cdd418e2b87,citation,http://www.vision.caltech.edu/~bart/Publications/2004/BartUllmanClassBasedMatching.pdf,Class-Based Matching of Object Parts,2004 +230,FERET,feret,39.4808376,-0.3409522,Universitat Politècnica de València,edu,99b8a24aacaa53fa3f8a7e48734037c7b16f1c40,citation,https://doi.org/10.1109/ACCESS.2017.2752176,A Proposal to Improve the Authentication Process in m-Health Environments,2017 +231,FERET,feret,32.7283683,-97.11201835,University of Texas at Arlington,edu,c2fa83e8a428c03c74148d91f60468089b80c328,citation,http://pdfs.semanticscholar.org/c2fa/83e8a428c03c74148d91f60468089b80c328.pdf,Optimal Mean Robust Principal Component Analysis,2014 +232,FERET,feret,28.0599999,-82.41383619,University of South Florida,edu,1b3e66bef13f114943d460b4f942e941b4761ba2,citation,http://www.nist.gov/customcf/get_pdf.cfm?pub_id=890061,Subspace Approximation of Face Recognition Algorithms: An Empirical Study,2008 +233,FERET,feret,39.1254938,-77.22293475,National Institute of Standards and Technology,edu,1b3e66bef13f114943d460b4f942e941b4761ba2,citation,http://www.nist.gov/customcf/get_pdf.cfm?pub_id=890061,Subspace Approximation of Face Recognition Algorithms: An Empirical Study,2008 +234,FERET,feret,28.0599999,-82.41383619,University of South Florida,edu,bdc3546ceee0c2bda9debff7de9aa7d53a03fe7d,citation,https://pdfs.semanticscholar.org/bdc3/546ceee0c2bda9debff7de9aa7d53a03fe7d.pdf,Modeling distance functions induced by face recognition algorithms,2015 +235,FERET,feret,37.4102193,-122.05965487,Carnegie Mellon University,edu,0fbe38527279f49561c0e1c6ff4e8f733fb79bbe,citation,http://pdfs.semanticscholar.org/7561/b691eb5e9913e4c3cb11caf2738d58b9c896.pdf,Integrating Utility into Face De-identification,2005 +236,FERET,feret,43.66333345,-79.39769975,University of Toronto,edu,90ea3a35e946af97372c3f32a170b179fe8352aa,citation,http://pdfs.semanticscholar.org/90ea/3a35e946af97372c3f32a170b179fe8352aa.pdf,Discriminant Learning for Face Recognition,2004 +237,FERET,feret,35.93006535,-84.31240032,Oak Ridge National Laboratory,edu,43a03cbe8b704f31046a5aba05153eb3d6de4142,citation,http://pdfs.semanticscholar.org/9594/3329cd6922a869dd6d58ef01e9492879034c.pdf,Towards Robust Face Recognition from Video,2001 +238,FERET,feret,37.43131385,-122.16936535,Stanford University,edu,cdd2ba6e6436cb5950692702053195a22789d129,citation,https://pdfs.semanticscholar.org/976c/3b5ad438fb0cf2fb157964e8e6f07a09ad9e.pdf,Face-likeness and image variability drive responses in human face-selective ventral regions.,2012 +239,FERET,feret,31.76909325,117.17795091,Anhui University,edu,b910590a0eb191d03e1aedb3d55c905129e92e6b,citation,http://doi.acm.org/10.1145/2808492.2808570,Robust gender classification on unconstrained face images,2015 +240,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,b910590a0eb191d03e1aedb3d55c905129e92e6b,citation,http://doi.acm.org/10.1145/2808492.2808570,Robust gender classification on unconstrained face images,2015 +241,FERET,feret,43.66333345,-79.39769975,University of Toronto,edu,dc4089294cb15e071893d24bdf2baa15de5dcb0b,citation,http://www.comm.toronto.edu/~kostas/Publications2008/pub/proceed/105.pdf,Feature selection for subject identification in surveillance photos [face recognition applications],2004 +242,FERET,feret,-33.3578899,151.37834708,University of Newcastle,edu,a80d057099a6ca872508f5d416a8cd67b788506a,citation,https://pdfs.semanticscholar.org/a80d/057099a6ca872508f5d416a8cd67b788506a.pdf,A dissociation between similarity effects in episodic face recognition.,2009 +243,FERET,feret,44.97308605,-93.23708813,University of Minnesota,edu,998cdde7c83a50f0abac69c7c3d20f3729a65d00,citation,https://pdfs.semanticscholar.org/998c/dde7c83a50f0abac69c7c3d20f3729a65d00.pdf,Redundancy effects in the perception and memory of visual objects,2010 +244,FERET,feret,34.66869155,-82.83743476,Clemson University,edu,56c273538a2dbb4cf43c39fa4725592e97ec1681,citation,http://pdfs.semanticscholar.org/56c2/73538a2dbb4cf43c39fa4725592e97ec1681.pdf,Eye Tracking to Enhance Facial Recognition Algorithms,2011 +245,FERET,feret,25.7173339,-80.27866887,University of Miami,edu,c1f07ec629be1c6fe562af0e34b04c54e238dcd1,citation,http://pdfs.semanticscholar.org/c1f0/7ec629be1c6fe562af0e34b04c54e238dcd1.pdf,A Novel Facial Feature Localization Method Using Probabilistic-like Output,2004 +246,FERET,feret,37.5600406,126.9369248,Yonsei University,edu,5173a20304ea7baa6bfe97944a5c7a69ea72530f,citation,http://pdfs.semanticscholar.org/5173/a20304ea7baa6bfe97944a5c7a69ea72530f.pdf,Best Basis Selection Method Using Learning Weights for Face Recognition,2013 +247,FERET,feret,40.00229045,116.32098908,Tsinghua University,edu,83e893858d6a6b8abb07d89e9f821f90c2b074ea,citation,http://doi.ieeecomputersociety.org/10.1109/ICPR.2004.1334677,Facial image retrieval based on demographic classification,2004 +248,FERET,feret,34.0224149,-118.28634407,University of Southern California,edu,2d8a84a8e661ce3913cb6c05b18984b14ed11dac,citation,http://pdfs.semanticscholar.org/6fd6/af3864fc5eb62e6328be79bf8174e939efcc.pdf,P3: Toward Privacy-Preserving Photo Sharing,2013 +249,FERET,feret,40.34829285,-74.66308325,Princeton University,edu,643d11703569766bed0a994941ae5f7b3e101659,citation,https://arxiv.org/pdf/1806.06098.pdf,Unsupervised Training for 3D Morphable Model Regression,2018 +250,FERET,feret,42.3619407,-71.0904378,MIT CSAIL,edu,643d11703569766bed0a994941ae5f7b3e101659,citation,https://arxiv.org/pdf/1806.06098.pdf,Unsupervised Training for 3D Morphable Model Regression,2018 +251,FERET,feret,-29.8674219,30.9807272,University of KwaZulu-Natal,edu,fcfb48b19f37e531a56ae95186a214b05c0b94c7,citation,https://pdfs.semanticscholar.org/fcfb/48b19f37e531a56ae95186a214b05c0b94c7.pdf,FACE RECOGNITION WITH EIGENFACES – A DETAILED STUDY,2012 +252,FERET,feret,32.87935255,-117.23110049,"University of California, San Diego",edu,8c22dc1b494c4612c4ebc61b22a480666cd841d5,citation,http://pdfs.semanticscholar.org/b95b/9fcccb23be8948e96f0c110aaaedc0f7334a.pdf,Towards Practical Facial Feature Detection,2009 +253,FERET,feret,30.284151,-97.73195598,University of Texas at Austin,edu,8c22dc1b494c4612c4ebc61b22a480666cd841d5,citation,http://pdfs.semanticscholar.org/b95b/9fcccb23be8948e96f0c110aaaedc0f7334a.pdf,Towards Practical Facial Feature Detection,2009 +254,FERET,feret,41.25713055,-72.9896696,Yale University,edu,297c4503a18a959e3a06613d5e7e026ba351b9bf,citation,http://pdfs.semanticscholar.org/297c/4503a18a959e3a06613d5e7e026ba351b9bf.pdf,Neurolaw: Differential brain activity for black and white faces predicts damage awards in hypothetical employment discrimination cases.,2012 +255,FERET,feret,53.21967825,6.56251482,University of Groningen,edu,4ff4c27e47b0aa80d6383427642bb8ee9d01c0ac,citation,https://doi.org/10.1109/SSCI.2015.37,Deep Convolutional Neural Networks and Support Vector Machines for Gender Recognition,2015 +256,FERET,feret,37.43131385,-122.16936535,Stanford University,edu,90f0e0701b755bbce89cb0e4e3f0a070d49814a0,citation,http://pdfs.semanticscholar.org/90f0/e0701b755bbce89cb0e4e3f0a070d49814a0.pdf,Beyond the retina: Evidence for a face inversion effect in the environmental frame of reference,2011 +257,FERET,feret,24.7925484,120.9951183,National Tsing Hua University,edu,ede16b198b83d04b52dc3f0dafc11fd82c5abac4,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952343,LBP edge-mapped descriptor using MGM interest points for face recognition,2017 +258,FERET,feret,42.0551164,-87.67581113,Northwestern University,edu,a6f93435e006328fd0a5dcb7639e771431cc2c37,citation,http://pdfs.semanticscholar.org/c161/7c3c90e4596867d94a00a3a2bb1d55c8843b.pdf,Why Some Faces won't be Remembered: Brain Potentials Illuminate Successful Versus Unsuccessful Encoding for Same-Race and Other-Race Faces,2011 +259,FERET,feret,40.72925325,-73.99625394,New York University,edu,a6f93435e006328fd0a5dcb7639e771431cc2c37,citation,http://pdfs.semanticscholar.org/c161/7c3c90e4596867d94a00a3a2bb1d55c8843b.pdf,Why Some Faces won't be Remembered: Brain Potentials Illuminate Successful Versus Unsuccessful Encoding for Same-Race and Other-Race Faces,2011 +260,FERET,feret,-34.9189226,138.60423668,University of Adelaide,edu,e2aafdd2f508ee383a0227de9cee00246f251ebf,citation,https://pdfs.semanticscholar.org/c6f0/53bc5dbdcd89cba842251feaa4bb8b91378b.pdf,Face Matching Under Time Pressure and Task Demands,0 +261,FERET,feret,39.9808333,116.34101249,Beihang University,edu,699be9152895977b0b272887320d543c9c7f6157,citation,http://pdfs.semanticscholar.org/699b/e9152895977b0b272887320d543c9c7f6157.pdf,Artistic Illumination Transfer for Portraits,2012 +262,FERET,feret,-29.8674219,30.9807272,University of KwaZulu-Natal,edu,651ea8b030470ab4a70efced154e77028a102713,citation,https://pdfs.semanticscholar.org/651e/a8b030470ab4a70efced154e77028a102713.pdf,Increasing Face Recognition Rate,2016 +263,FERET,feret,32.0565957,118.77408833,Nanjing University,edu,62647a8f8a534db2ccfd0df7d513b4f084231d10,citation,http://pdfs.semanticscholar.org/6264/7a8f8a534db2ccfd0df7d513b4f084231d10.pdf,Weighted SOM-Face: Selecting Local Features for Recognition from Individual Face Image,2005 +264,FERET,feret,31.30104395,121.50045497,Fudan University,edu,62647a8f8a534db2ccfd0df7d513b4f084231d10,citation,http://pdfs.semanticscholar.org/6264/7a8f8a534db2ccfd0df7d513b4f084231d10.pdf,Weighted SOM-Face: Selecting Local Features for Recognition from Individual Face Image,2005 +265,FERET,feret,52.22165395,21.00735776,Warsaw University of Technology,edu,d31bf8f6f9404a0ab2e601e723b9a07287d0693b,citation,http://pdfs.semanticscholar.org/d31b/f8f6f9404a0ab2e601e723b9a07287d0693b.pdf,Feature Space Reduction for Face Recognition with Dual Linear Discriminant Analysis,2005 +266,FERET,feret,38.83133325,-77.30798839,George Mason University,edu,35cdd4df9f039f475247bf03fdcc605e40683dce,citation,http://pdfs.semanticscholar.org/35cd/d4df9f039f475247bf03fdcc605e40683dce.pdf,Eye Detection and Face Recognition Using Evolutionary Computation,1998 +267,FERET,feret,38.2530945,140.8736593,Tohoku University,edu,5c707dc74c3c39674f74dc22f6b6325af456811c,citation,http://www.aoki.ecei.tohoku.ac.jp/~ito/W13_04.pdf,Restoring occluded regions using FW-PCA for face recognition,2012 +268,FERET,feret,38.83133325,-77.30798839,George Mason University,edu,a40476d94c5cf1f929ee9514d3761dca00dd774b,citation,http://pdfs.semanticscholar.org/a404/76d94c5cf1f929ee9514d3761dca00dd774b.pdf,Watch List Face Surveillance Using Transductive Inference,2004 +269,FERET,feret,41.25713055,-72.9896696,Yale University,edu,e4691de78d35ed7085311a466b8d02198bf714ac,citation,http://pdfs.semanticscholar.org/e469/1de78d35ed7085311a466b8d02198bf714ac.pdf,The relation between race-related implicit associations and scalp-recorded neural activity evoked by faces from different races.,2009 +270,FERET,feret,32.87935255,-117.23110049,"University of California, San Diego",edu,85639cefb8f8deab7017ce92717674d6178d43cc,citation,http://pdfs.semanticscholar.org/8563/9cefb8f8deab7017ce92717674d6178d43cc.pdf,Automatic Analysis of Spontaneous Facial Behavior: A Final Project Report,2001 +271,FERET,feret,32.87935255,-117.23110049,"University of California, San Diego",edu,a967426ec9b761a989997d6a213d890fc34c5fe3,citation,http://vision.ucsd.edu/sites/default/files/043-wacv.pdf,Relative ranking of facial attractiveness,2013 +272,FERET,feret,37.3003127,126.972123,SungKyunKwan University,edu,055530f7f771bb1d5f352e2758d1242408d34e4d,citation,http://pdfs.semanticscholar.org/0555/30f7f771bb1d5f352e2758d1242408d34e4d.pdf,A Facial Expression Recognition System from Depth Video,2014 +273,FERET,feret,38.83133325,-77.30798839,George Mason University,edu,be84d76093a791bf78bed74ef1d7db54abeca878,citation,http://pdfs.semanticscholar.org/be84/d76093a791bf78bed74ef1d7db54abeca878.pdf,Open World Face Recognition with Credibility and Confidence Measures,2003 +274,FERET,feret,28.0599999,-82.41383619,University of South Florida,edu,ddb49e36570af09d96059b3b6f08f9124aafe24f,citation,http://doi.ieeecomputersociety.org/10.1109/ICPR.2006.129,A Non-Iterative Approach to Reconstruct Face Templates from Match Scores,2006 +275,FERET,feret,32.42143805,-81.78450529,Georgia Southern University,edu,98fcf33916a9bb4efdc652541573b2e7ef9e7d87,citation,http://pdfs.semanticscholar.org/98fc/f33916a9bb4efdc652541573b2e7ef9e7d87.pdf,Trustworthy Tricksters: Violating a Negative Social Expectation Affects Source Memory and Person Perception When Fear of Exploitation Is High,2016 +276,FERET,feret,37.5600406,126.9369248,Yonsei University,edu,11fa5abb5d5d09efbf9dacae6a6ceb9b2647f877,citation,https://arxiv.org/pdf/1507.02049v3.pdf,DCTNet: A simple learning-free approach for face recognition,2015 +277,FERET,feret,49.10184375,8.4331256,Karlsruhe Institute of Technology,edu,cfdc4d0f8e1b4b9ced35317d12b4229f2e3311ab,citation,https://pdfs.semanticscholar.org/cfdc/4d0f8e1b4b9ced35317d12b4229f2e3311ab.pdf,Quaero at TRECVID 2010: Semantic Indexing,2010 +278,FERET,feret,65.0592157,25.46632601,University of Oulu,edu,dbc749490275db26337c7e3201027e8cef8e371c,citation,http://pdfs.semanticscholar.org/dbc7/49490275db26337c7e3201027e8cef8e371c.pdf,Multi-band Gradient Component Pattern (MGCP): A New Statistical Feature for Face Recognition,2009 +279,FERET,feret,47.05821,15.46019568,Graz University of Technology,edu,65f6d0d91cdf1a77e3c5cb78c7d21f0f4f01f8b5,citation,http://pdfs.semanticscholar.org/65f6/d0d91cdf1a77e3c5cb78c7d21f0f4f01f8b5.pdf,"PhD Thesis Incremental, Robust, and Efficient Linear Discriminant Analysis Learning",2008 +280,FERET,feret,32.0565957,118.77408833,Nanjing University,edu,0e9ea74cf7106057efdb63f275ca6bb838168b0c,citation,http://pdfs.semanticscholar.org/0e9e/a74cf7106057efdb63f275ca6bb838168b0c.pdf,Progressive Principal Component Analysis,2004 +281,FERET,feret,51.0784038,-114.1287077,University of Calgary,edu,d4d2014f05e17869b72f180fd0065358c722ac65,citation,http://pdfs.semanticscholar.org/d4d2/014f05e17869b72f180fd0065358c722ac65.pdf,UNIVERSITY OF CALGARY A MULTIMODAL BIOMETRIC SYSTEM BASED ON RANK LEVEL FUSION by MD. MARUF MONWAR A THESIS SUBMITTED TO THE FACULTY OF GRADUATE STUDIES IN PARTIAL FULFILMENT OF THE REQUIREMENTS FOR THE DEGREE OF DOCTOR OF PHILOSOPHY,2012 +282,FERET,feret,24.18005755,120.64836072,Feng Chia University,edu,fdd7c9f3838b8d868911afaafa08beffb79b5228,citation,https://pdfs.semanticscholar.org/fdd7/c9f3838b8d868911afaafa08beffb79b5228.pdf,An efficient mechanism for compensating vague pattern identification in support of a multi-criteria recommendation system,2016 +283,FERET,feret,22.9991916,120.21625134,National Cheng Kung University,edu,fdd7c9f3838b8d868911afaafa08beffb79b5228,citation,https://pdfs.semanticscholar.org/fdd7/c9f3838b8d868911afaafa08beffb79b5228.pdf,An efficient mechanism for compensating vague pattern identification in support of a multi-criteria recommendation system,2016 +284,FERET,feret,41.10427915,29.02231159,Istanbul Technical University,edu,d3d5d86afec84c0713ec868cf5ed41661fc96edc,citation,https://arxiv.org/pdf/1606.02894.pdf,A Comprehensive Analysis of Deep Learning Based Representation for Face Recognition,2016 +285,FERET,feret,40.8927159,29.37863323,Sabanci University,edu,d3d5d86afec84c0713ec868cf5ed41661fc96edc,citation,https://arxiv.org/pdf/1606.02894.pdf,A Comprehensive Analysis of Deep Learning Based Representation for Face Recognition,2016 +286,FERET,feret,32.7283683,-97.11201835,University of Texas at Arlington,edu,20100dbeb2dfebc7595d79755d737b21e75f39a6,citation,http://pdfs.semanticscholar.org/2010/0dbeb2dfebc7595d79755d737b21e75f39a6.pdf,Cluster Indicator Decomposition for Efficient Matrix Factorization,2011 +287,FERET,feret,37.4102193,-122.05965487,Carnegie Mellon University,edu,3ca9453d3c023bb81cce72ff2d633fc5075e1df6,citation,http://pdfs.semanticscholar.org/e36f/5fab8758194fcad043e23288330657fe7742.pdf,Generic vs. Person Specific Active Appearance Models,2004 +288,FERET,feret,28.59899755,-81.19712501,University of Central Florida,edu,d082f35534932dfa1b034499fc603f299645862d,citation,http://pdfs.semanticscholar.org/d082/f35534932dfa1b034499fc603f299645862d.pdf,"TAMING WILD FACES: WEB-SCALE, OPEN-UNIVERSE FACE IDENTIFICATION IN STILL AND VIDEO IMAGERY by ENRIQUE",2014 +289,FERET,feret,40.00229045,116.32098908,Tsinghua University,edu,649b47e02b82afeccc858f1f3dcec98379bfbbbd,citation,http://pdfs.semanticscholar.org/649b/47e02b82afeccc858f1f3dcec98379bfbbbd.pdf,Face Alignment Under Various Poses and Expressions,2005 +290,FERET,feret,37.43131385,-122.16936535,Stanford University,edu,7264c2a8900c2ab41575578eb2d50557b2829f84,citation,http://pdfs.semanticscholar.org/7264/c2a8900c2ab41575578eb2d50557b2829f84.pdf,Silhouetted face profiles: a new methodology for face perception research.,2007 +291,FERET,feret,32.0565957,118.77408833,Nanjing University,edu,9887ab220254859ffc7354d5189083a87c9bca6e,citation,http://pdfs.semanticscholar.org/9887/ab220254859ffc7354d5189083a87c9bca6e.pdf,Generic Image Classification Approaches Excel on Face Recognition,2013 +292,FERET,feret,-34.9189226,138.60423668,University of Adelaide,edu,9887ab220254859ffc7354d5189083a87c9bca6e,citation,http://pdfs.semanticscholar.org/9887/ab220254859ffc7354d5189083a87c9bca6e.pdf,Generic Image Classification Approaches Excel on Face Recognition,2013 +293,FERET,feret,58.38131405,26.72078081,University of Tartu,edu,838ed2aae603dec5851ebf5e4bc64b54db7f34be,citation,http://pdfs.semanticscholar.org/838e/d2aae603dec5851ebf5e4bc64b54db7f34be.pdf,Real-Time Ensemble Based Face Recognition System for Humanoid Robots,2016 +294,FERET,feret,32.8536333,-117.2035286,Kyung Hee University,edu,6fe83b5fdeeb6d92f24af3aed6a34c5bf9ce8845,citation,http://pdfs.semanticscholar.org/6fe8/3b5fdeeb6d92f24af3aed6a34c5bf9ce8845.pdf,Face Recognition Based on Local Directional Pattern Variance (LDPv),2012 +295,FERET,feret,23.7289899,90.3982682,Institute of Information Technology,edu,6e177341d4412f9c9a639e33e6096344ef930202,citation,https://pdfs.semanticscholar.org/2e58/ec57d71b2b2a3e71086234dd7037559cc17e.pdf,A Gender Recognition System from Facial Image,2018 +296,FERET,feret,23.7316957,90.3965275,University of Dhaka,edu,6e177341d4412f9c9a639e33e6096344ef930202,citation,https://pdfs.semanticscholar.org/2e58/ec57d71b2b2a3e71086234dd7037559cc17e.pdf,A Gender Recognition System from Facial Image,2018 +297,FERET,feret,40.7423025,-74.17928172,New Jersey Institute of Technology,edu,327eab70296d39511d61e91c6839446d59f5e119,citation,https://pdfs.semanticscholar.org/327e/ab70296d39511d61e91c6839446d59f5e119.pdf,Roadmap for Reliable Ensemble Forecasting of the Sun-Earth System,2018 +298,FERET,feret,21.2982795,-157.8186923,University of Hawaii,edu,327eab70296d39511d61e91c6839446d59f5e119,citation,https://pdfs.semanticscholar.org/327e/ab70296d39511d61e91c6839446d59f5e119.pdf,Roadmap for Reliable Ensemble Forecasting of the Sun-Earth System,2018 +299,FERET,feret,40.11116745,-88.22587665,"University of Illinois, Urbana-Champaign",edu,327eab70296d39511d61e91c6839446d59f5e119,citation,https://pdfs.semanticscholar.org/327e/ab70296d39511d61e91c6839446d59f5e119.pdf,Roadmap for Reliable Ensemble Forecasting of the Sun-Earth System,2018 +300,FERET,feret,38.83133325,-77.30798839,George Mason University,edu,327eab70296d39511d61e91c6839446d59f5e119,citation,https://pdfs.semanticscholar.org/327e/ab70296d39511d61e91c6839446d59f5e119.pdf,Roadmap for Reliable Ensemble Forecasting of the Sun-Earth System,2018 +301,FERET,feret,34.13710185,-118.12527487,California Institute of Technology,edu,327eab70296d39511d61e91c6839446d59f5e119,citation,https://pdfs.semanticscholar.org/327e/ab70296d39511d61e91c6839446d59f5e119.pdf,Roadmap for Reliable Ensemble Forecasting of the Sun-Earth System,2018 +302,FERET,feret,42.2942142,-83.71003894,University of Michigan,edu,327eab70296d39511d61e91c6839446d59f5e119,citation,https://pdfs.semanticscholar.org/327e/ab70296d39511d61e91c6839446d59f5e119.pdf,Roadmap for Reliable Ensemble Forecasting of the Sun-Earth System,2018 +303,FERET,feret,41.7411504,-111.8122309,Utah State University,edu,327eab70296d39511d61e91c6839446d59f5e119,citation,https://pdfs.semanticscholar.org/327e/ab70296d39511d61e91c6839446d59f5e119.pdf,Roadmap for Reliable Ensemble Forecasting of the Sun-Earth System,2018 +304,FERET,feret,36.3697191,127.362537,Korea Advanced Institute of Science and Technology,edu,b29f348e8675f75ff160ec65ebeeb3f3979b65d8,citation,http://pdfs.semanticscholar.org/b29f/348e8675f75ff160ec65ebeeb3f3979b65d8.pdf,An objective and subjective evaluation of content-based privacy protection of face images in video surveillance systems using JPEG XR,2013 +305,FERET,feret,43.66333345,-79.39769975,University of Toronto,edu,b29f348e8675f75ff160ec65ebeeb3f3979b65d8,citation,http://pdfs.semanticscholar.org/b29f/348e8675f75ff160ec65ebeeb3f3979b65d8.pdf,An objective and subjective evaluation of content-based privacy protection of face images in video surveillance systems using JPEG XR,2013 +306,FERET,feret,39.9922379,116.30393816,Peking University,edu,1c2724243b27a18a2302f12dea79d9a1d4460e35,citation,http://read.pudn.com/downloads157/doc/697237/kfd/Fisher+Kernel%20criterion%20for%20discriminant%20analysis.pdf,Fisher+Kernel criterion for discriminant analysis,2005 +307,FERET,feret,31.83907195,117.26420748,University of Science and Technology of China,edu,1c2724243b27a18a2302f12dea79d9a1d4460e35,citation,http://read.pudn.com/downloads157/doc/697237/kfd/Fisher+Kernel%20criterion%20for%20discriminant%20analysis.pdf,Fisher+Kernel criterion for discriminant analysis,2005 +308,FERET,feret,22.42031295,114.20788644,Chinese University of Hong Kong,edu,1c2724243b27a18a2302f12dea79d9a1d4460e35,citation,http://read.pudn.com/downloads157/doc/697237/kfd/Fisher+Kernel%20criterion%20for%20discriminant%20analysis.pdf,Fisher+Kernel criterion for discriminant analysis,2005 +309,FERET,feret,50.74223495,-1.89433739,Bournemouth University,edu,d16f37a15f6385a6a189b06833745da5d524f69b,citation,https://pdfs.semanticscholar.org/d16f/37a15f6385a6a189b06833745da5d524f69b.pdf,Hebb repetition effects for non-verbal visual sequences: determinants of sequence acquisition.,2017 +310,FERET,feret,40.00229045,116.32098908,Tsinghua University,edu,13791aa7c1047724c4046eee94e66a506b211eb9,citation,http://pdfs.semanticscholar.org/1379/1aa7c1047724c4046eee94e66a506b211eb9.pdf,Real-time Gender Classification,2003 +311,FERET,feret,37.3003127,126.972123,SungKyunKwan University,edu,fa72e39971855dff6beb8174b5fa654e0ab7d324,citation,https://doi.org/10.1007/s11042-013-1793-1,"A depth video-based facial expression recognition system using radon transform, generalized discriminant analysis, and hidden Markov model",2013 +312,FERET,feret,24.7246403,46.62335012,King Saud University,edu,fa72e39971855dff6beb8174b5fa654e0ab7d324,citation,https://doi.org/10.1007/s11042-013-1793-1,"A depth video-based facial expression recognition system using radon transform, generalized discriminant analysis, and hidden Markov model",2013 +313,FERET,feret,27.18794105,31.17009498,Assiut University,edu,3843b8c4143e9f1e50c61eb462376e65861bbf24,citation,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.359,Color Image Processing Using Reduced Biquaternions with Application to Face Recognition in a PCA Framework,2017 +314,FERET,feret,37.4102193,-122.05965487,Carnegie Mellon University,edu,0cc3c62f762d64cffcab4ac7fea3896cb22a3df9,citation,http://pdfs.semanticscholar.org/d30f/cc0e4c2c78cc5ff7bbd1227d3952d366a479.pdf,Preserving Privacy by De-identifying Facial Images,2003 +315,FERET,feret,53.8925662,-122.81471592,University of Northern British Columbia,edu,2cae2ca6221fbfa9655e41ac52e54631ada7ad2c,citation,http://pdfs.semanticscholar.org/ffd6/14925a326efcb27ef52accd5638a912b4792.pdf,Electoral College and Direct Popular Vote for Multi-Candidate Election,2010 +316,FERET,feret,34.2249827,-77.86907744,University of North Carolina at Wilmington,edu,328bfd1d0229bc4973277f893abd1eb288159fc9,citation,http://pdfs.semanticscholar.org/328b/fd1d0229bc4973277f893abd1eb288159fc9.pdf,A review of the literature on the aging adult skull and face: implications for forensic science research and applications.,2007 +317,FERET,feret,32.87935255,-117.23110049,"University of California, San Diego",edu,18b4e9e51ee14c9d816358fbe1af29f0771b7916,citation,http://pdfs.semanticscholar.org/18b4/e9e51ee14c9d816358fbe1af29f0771b7916.pdf,Intelligent environments and active camera networks,2000 +318,FERET,feret,40.8722825,-73.89489171,City University of New York,edu,0dde6981047067692793b71a2f7ad6a8708741d8,citation,http://pdfs.semanticscholar.org/0dde/6981047067692793b71a2f7ad6a8708741d8.pdf,MODELING PHYSICAL PERSONALITIES FOR VIRTUAL AGENTS BY MODELING TRAIT IMPRESSIONS OF THE FACE: A NEURAL NETWORK ANALYSIS by SHERYL BRAHNAM,2002 +319,FERET,feret,45.7413921,126.62552755,Harbin Institute of Technology,edu,20675281008211641d28ce0f2b6946537a8535c4,citation,http://pdfs.semanticscholar.org/2067/5281008211641d28ce0f2b6946537a8535c4.pdf,Multi-resolution Histograms of Local Variation Patterns (MHLVP) for Robust Face Recognition,2005 +320,FERET,feret,52.9387428,-1.20029569,University of Nottingham,edu,c22df6df55f5c6539e1a4d2e2d50dbaab34007a7,citation,http://pdfs.semanticscholar.org/c22d/f6df55f5c6539e1a4d2e2d50dbaab34007a7.pdf,Compact Binary Patterns (CBP) with Multiple Patch Classifiers for Fast and Accurate Face Recognition,2010 +321,FERET,feret,32.87935255,-117.23110049,"University of California, San Diego",edu,2e6e335e591da1e8899ff53f9a7ddb4c63520104,citation,http://pdfs.semanticscholar.org/528a/6698911ff30aa648af4d0a5cf0dd9ee90b5c.pdf,Is All Face Processing Holistic? The View from UCSD,2003 +322,FERET,feret,41.6659,-91.57310307,University of Iowa,edu,2e6e335e591da1e8899ff53f9a7ddb4c63520104,citation,http://pdfs.semanticscholar.org/528a/6698911ff30aa648af4d0a5cf0dd9ee90b5c.pdf,Is All Face Processing Holistic? The View from UCSD,2003 +323,FERET,feret,42.57054745,-88.55578627,University of Geneva,edu,9c1b132243e0dcacde1717ce1cfe730a74bd8cbc,citation,http://pdfs.semanticscholar.org/9c1b/132243e0dcacde1717ce1cfe730a74bd8cbc.pdf,Hippocampus Is Place of Interaction between Unconscious and Conscious Memories,2015 +324,FERET,feret,1.2962018,103.77689944,National University of Singapore,edu,4fb9f05dc03eb4983d8f9a815745bb47970f1b93,citation,http://pdfs.semanticscholar.org/f4ee/4f7ac7585f7ea0db3b27c5ad016dbfb0feac.pdf,"On Robust Face Recognition via Sparse Encoding: the Good, the Bad, and the Ugly",2013 +325,FERET,feret,-27.49741805,153.01316956,University of Queensland,edu,4fb9f05dc03eb4983d8f9a815745bb47970f1b93,citation,http://pdfs.semanticscholar.org/f4ee/4f7ac7585f7ea0db3b27c5ad016dbfb0feac.pdf,"On Robust Face Recognition via Sparse Encoding: the Good, the Bad, and the Ugly",2013 +326,FERET,feret,-27.47715625,153.02841004,Queensland University of Technology,edu,4fb9f05dc03eb4983d8f9a815745bb47970f1b93,citation,http://pdfs.semanticscholar.org/f4ee/4f7ac7585f7ea0db3b27c5ad016dbfb0feac.pdf,"On Robust Face Recognition via Sparse Encoding: the Good, the Bad, and the Ugly",2013 +327,FERET,feret,52.9387428,-1.20029569,University of Nottingham,edu,b9df25cc4be2f703b059da93823bad6e8e8c0659,citation,http://pdfs.semanticscholar.org/b9df/25cc4be2f703b059da93823bad6e8e8c0659.pdf,Local Gabor Binary Pattern Whitened PCA: A Novel Approach for Face Recognition from Single Image Per Person,2009 +328,FERET,feret,24.4399419,118.09301781,Xiamen University,edu,57ba4b6de23a6fc9d45ff052ed2563e5de00b968,citation,https://doi.org/10.1109/ICIP.2017.8296993,An efficient deep neural networks training framework for robust face recognition,2017 +329,FERET,feret,32.7283683,-97.11201835,University of Texas at Arlington,edu,90bd16caa44086db6f0e4bbc1dde7063cb71b7b8,citation,http://www.kdd.org/kdd2016/papers/files/rfp1162-wangA.pdf,Structured Doubly Stochastic Matrix for Graph Based Clustering: Structured Doubly Stochastic Matrix,2016 +330,FERET,feret,1.2962018,103.77689944,National University of Singapore,edu,15d1582c8b65dbab5ca027467718a2c286ddce7a,citation,http://pdfs.semanticscholar.org/15d1/582c8b65dbab5ca027467718a2c286ddce7a.pdf,"On robust face recognition via sparse coding: the good, the bad and the ugly",2014 +331,FERET,feret,-27.49741805,153.01316956,University of Queensland,edu,15d1582c8b65dbab5ca027467718a2c286ddce7a,citation,http://pdfs.semanticscholar.org/15d1/582c8b65dbab5ca027467718a2c286ddce7a.pdf,"On robust face recognition via sparse coding: the good, the bad and the ugly",2014 +332,FERET,feret,-27.47715625,153.02841004,Queensland University of Technology,edu,15d1582c8b65dbab5ca027467718a2c286ddce7a,citation,http://pdfs.semanticscholar.org/15d1/582c8b65dbab5ca027467718a2c286ddce7a.pdf,"On robust face recognition via sparse coding: the good, the bad and the ugly",2014 +333,FERET,feret,34.8452999,48.5596212,Islamic Azad University,edu,e19a4dadf60848309c8fd7445d97918da654df76,citation,https://pdfs.semanticscholar.org/e19a/4dadf60848309c8fd7445d97918da654df76.pdf,JPEG Compressed Domain Face Recognition : Different Stages and Different Features,2013 +334,FERET,feret,47.5612651,7.5752961,University of Basel,edu,d1633dc3706580c8b9d98c4c0dfa9f9a29360ca3,citation,https://arxiv.org/pdf/1712.01619.pdf,Empirically Analyzing the Effect of Dataset Biases on Deep Face Recognition Systems,2018 +335,FERET,feret,51.49887085,-0.17560797,Imperial College London,edu,e104e213faa97d9a9c8b8e1f15b7431c601cb250,citation,https://arxiv.org/pdf/1802.04636.pdf,Modeling of facial aging and kinship: A survey,2018 +336,FERET,feret,51.59029705,-0.22963221,Middlesex University,edu,e104e213faa97d9a9c8b8e1f15b7431c601cb250,citation,https://arxiv.org/pdf/1802.04636.pdf,Modeling of facial aging and kinship: A survey,2018 +337,FERET,feret,43.66333345,-79.39769975,University of Toronto,edu,da6696345d0d4ff6328c1c5916b0ca870d4cc6cf,citation,http://pdfs.semanticscholar.org/da66/96345d0d4ff6328c1c5916b0ca870d4cc6cf.pdf,Robust Contrast-Invariant EigenDetection,2002 +338,FERET,feret,52.2380139,6.8566761,University of Twente,edu,3b3550680136aa2fe3bd57c9faa3bfa0dfb3e748,citation,http://pdfs.semanticscholar.org/3b35/50680136aa2fe3bd57c9faa3bfa0dfb3e748.pdf,Forensic Face Recognition: a Survey,2010 +339,FERET,feret,31.30104395,121.50045497,Fudan University,edu,4ba3f9792954ee3ba894e1e330cd77da4668fa22,citation,http://pdfs.semanticscholar.org/4ba3/f9792954ee3ba894e1e330cd77da4668fa22.pdf,Nearest Neighbor Discriminant Analysis,2006 +340,FERET,feret,52.9387428,-1.20029569,University of Nottingham,edu,472ba8dd4ec72b34e85e733bccebb115811fd726,citation,http://pdfs.semanticscholar.org/472b/a8dd4ec72b34e85e733bccebb115811fd726.pdf,Cosine Similarity Metric Learning for Face Verification,2010 +341,FERET,feret,46.109237,7.08453549,IDIAP Research Institute,edu,ba9e967208976f24a09730af94086e7ae0417067,citation,http://pdfs.semanticscholar.org/f369/03d22a463876b895bbe37b5f9ad235a38edd.pdf,An Open Source Framework for Standardized Comparisons of Face Recognition Algorithms,2012 +342,FERET,feret,40.4319722,-86.92389368,Purdue University,edu,4d527974512083712c9adf26a923b44d7e426b44,citation,http://pdfs.semanticscholar.org/4d52/7974512083712c9adf26a923b44d7e426b44.pdf,Impact of Image Quality on Performance: Comparison of Young and Elderly Fingerprints,2006 +343,FERET,feret,-38.19928505,144.30365229,Deakin University,edu,e96ce25d11296fce4e2ecc2da03bd207dc118724,citation,https://doi.org/10.1007/s00138-007-0095-x,Classification of face images using local iterated function systems,2007 +344,FERET,feret,42.0551164,-87.67581113,Northwestern University,edu,fcd2fb1ada96218dcc2547efa040e76416cc7066,citation,http://pdfs.semanticscholar.org/fcd2/fb1ada96218dcc2547efa040e76416cc7066.pdf,Perceptual data mining: bootstrapping visual intelligence from tracking behavior,2002 +345,FERET,feret,42.3583961,-71.09567788,MIT,edu,fcd2fb1ada96218dcc2547efa040e76416cc7066,citation,http://pdfs.semanticscholar.org/fcd2/fb1ada96218dcc2547efa040e76416cc7066.pdf,Perceptual data mining: bootstrapping visual intelligence from tracking behavior,2002 +346,FERET,feret,37.3351908,-121.88126008,San Jose State University,edu,97930609f1a5066fd437ed8a4e57abbfb1ae4b12,citation,http://pdfs.semanticscholar.org/bef4/03c136beaa6fd43fc3184d4666512daaf9e5.pdf,Best Practices in Testing and Reporting Performance of Biometric Devices,2002 +347,FERET,feret,47.5612651,7.5752961,University of Basel,edu,985dc9b8b003483f6df363a8ce07dd8c89ced903,citation,http://pdfs.semanticscholar.org/985d/c9b8b003483f6df363a8ce07dd8c89ced903.pdf,"3D Morphable Face Model, a Unified Approach for Analysis and Synthesis of Images",0 +348,FERET,feret,34.2375581,-77.9270129,University of North Carolina Wilmington,edu,1057137d8ebbbfc4e816d74edd7ab04f61a893f8,citation,https://pdfs.semanticscholar.org/1057/137d8ebbbfc4e816d74edd7ab04f61a893f8.pdf,Craniofacial Aging,2008 +349,FERET,feret,37.548215,-77.45306424,Virginia Commonwealth University,edu,1057137d8ebbbfc4e816d74edd7ab04f61a893f8,citation,https://pdfs.semanticscholar.org/1057/137d8ebbbfc4e816d74edd7ab04f61a893f8.pdf,Craniofacial Aging,2008 +350,FERET,feret,37.4102193,-122.05965487,Carnegie Mellon University,edu,005d818ff8517669d62ba7b536e76b56698fa135,citation,http://pdfs.semanticscholar.org/4d7e/e94f164cce28a8bfef4417e9a99265b02b54.pdf,Neural Network-Based Face Detection,1996 +351,FERET,feret,39.9492344,-75.19198985,University of Pennsylvania,edu,0c85d1b384bb6e2d5d6e4db5461a7101ceed6808,citation,http://pdfs.semanticscholar.org/0ff8/d39a962ed902e1c995815ade265ea903d218.pdf,Engineering Privacy in Public: Confounding Face Recognition,2003 +352,FERET,feret,37.21872455,-80.42542519,Virginia Polytechnic Institute and State University,edu,9107543d9a9d915c92fe4139932c5d818cfc187d,citation,http://pdfs.semanticscholar.org/9107/543d9a9d915c92fe4139932c5d818cfc187d.pdf,Investigation of New Techniques for Face Detection,2007 +353,FERET,feret,37.4102193,-122.05965487,Carnegie Mellon University,edu,b3e856729f89b082b4108561479ff09394bb6553,citation,http://pdfs.semanticscholar.org/b3e8/56729f89b082b4108561479ff09394bb6553.pdf,Pose Robust Video - Based Face Recognition,2004 +354,FERET,feret,34.0224149,-118.28634407,University of Southern California,edu,d1836e137787fadb28d3418e029534765bcf1dae,citation,http://pdfs.semanticscholar.org/d183/6e137787fadb28d3418e029534765bcf1dae.pdf,"Analysis , Synthesis and Recognition of Human Faces with Pose Variations",2001 +355,FERET,feret,37.4102193,-122.05965487,Carnegie Mellon University,edu,2fd1c99edbb3d22cec4adc9ba9319cfc2360e903,citation,http://pdfs.semanticscholar.org/98c8/ca05ed5baff5b217c571ab5c5a0ee0706e27.pdf,Rotation Invariant Neural Network-Based Face Detection,1998 +356,FERET,feret,45.5039761,-73.5749687,McGill University,edu,b6145d3268032da70edc9cfececa1f9ffa4e3f11,citation,http://cnl.salk.edu/~zhafed/papers/fr_IJCV_2001.pdf,Face Recognition Using the Discrete Cosine Transform,2001 +357,FERET,feret,54.00975365,-2.78757491,Lancaster University,edu,01b73cfd803f0bdeab8bbfc26cd1ed110c762c91,citation,http://pdfs.semanticscholar.org/01b7/3cfd803f0bdeab8bbfc26cd1ed110c762c91.pdf,Facial Recognition Technology A Survey of Policy and Implementation Issues,2009 +358,FERET,feret,34.0224149,-118.28634407,University of Southern California,edu,c9579768d142a7020d095090183805c98a2f78e5,citation,http://pdfs.semanticscholar.org/e30d/b2331efa48f6c60330d492210ed6395774f2.pdf,The Bochum/USC Face Recognition System and How it Fared in the FERET Phase III Test,0 +359,FERET,feret,37.4102193,-122.05965487,Carnegie Mellon University,edu,42fe5666599f35b805657e829e8f9093ee95b908,citation,http://pdfs.semanticscholar.org/42fe/5666599f35b805657e829e8f9093ee95b908.pdf,Pose-Tolerant Face Recognition,2015 +360,FERET,feret,42.3583961,-71.09567788,MIT,edu,29c7dfbbba7a74e9aafb6a6919629b0a7f576530,citation,http://pdfs.semanticscholar.org/29c7/dfbbba7a74e9aafb6a6919629b0a7f576530.pdf,Automatic Facial Expression Analysis and Emotional Classification,2004 +361,FERET,feret,34.0224149,-118.28634407,University of Southern California,edu,f6a65be9a3790e8fd3b5116450a47a8e48a54d63,citation,http://pdfs.semanticscholar.org/f6a6/5be9a3790e8fd3b5116450a47a8e48a54d63.pdf,Parametric Piecewise Linear Subspace Method for Processing Facial Images with 3D Pose Variations,0 +362,FERET,feret,38.8964679,-104.8050594,University of Colorado at Colorado Springs,edu,07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1,citation,http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf,Large scale unconstrained open set face database,2013 +363,FERET,feret,41.5007811,2.11143663,Universitat Autònoma de Barcelona,edu,f7f19ac1c4e38c104045c306f5ddac6329193d8c,citation,http://pdfs.semanticscholar.org/f7f1/9ac1c4e38c104045c306f5ddac6329193d8c.pdf,Measuring External Face Appearance for Face Classification,2007 +364,FERET,feret,28.0599999,-82.41383619,University of South Florida,edu,57bd46b16644be40b2e0dc595c1aaa6abbadba89,citation,http://pdfs.semanticscholar.org/c3f7/6fe32a0ca448f1ce7004198827df48bf827b.pdf,Overview of Work in Empirical Evaluation of Computer Vision Algorithms,2005 +365,FERET,feret,39.1254938,-77.22293475,National Institute of Standards and Technology,edu,57bd46b16644be40b2e0dc595c1aaa6abbadba89,citation,http://pdfs.semanticscholar.org/c3f7/6fe32a0ca448f1ce7004198827df48bf827b.pdf,Overview of Work in Empirical Evaluation of Computer Vision Algorithms,2005 +366,FERET,feret,40.4319722,-86.92389368,Purdue University,edu,fc83a26beb38b17af737c4ff34141d0deea3a4e1,citation,http://pdfs.semanticscholar.org/fc83/a26beb38b17af737c4ff34141d0deea3a4e1.pdf,The Challenges of the Environment and the Human / Biometric Device Interaction on Biometric System Performance,2004 +367,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,1e3739716e163fce6fded71eda078a18334aa83b,citation,https://doi.org/10.1109/CVPRW.2009.5204149,The HFB Face Database for Heterogeneous Face Biometrics research,2009 +368,FERET,feret,22.42031295,114.20788644,Chinese University of Hong Kong,edu,a0d6390dd28d802152f207940c7716fe5fae8760,citation,http://pdfs.semanticscholar.org/a0d6/390dd28d802152f207940c7716fe5fae8760.pdf,Bayesian Face Revisited: A Joint Formulation,2012 +369,FERET,feret,31.83907195,117.26420748,University of Science and Technology of China,edu,a0d6390dd28d802152f207940c7716fe5fae8760,citation,http://pdfs.semanticscholar.org/a0d6/390dd28d802152f207940c7716fe5fae8760.pdf,Bayesian Face Revisited: A Joint Formulation,2012 +370,FERET,feret,40.00229045,116.32098908,Tsinghua University,edu,b19ca50a9e2415072a97482005fe0b77a8a495ce,citation,http://pdfs.semanticscholar.org/b19c/a50a9e2415072a97482005fe0b77a8a495ce.pdf,Hierarchical Direct Appearance Model for Elastic Labeled Graph Localization,2003 +371,FERET,feret,55.94951105,-3.19534913,University of Edinburgh,edu,5bf954ca82b42865c49eef4b064278b82f3b38de,citation,http://pdfs.semanticscholar.org/80b0/045eed3a1fc9ab502963f6fb3e6f70a2f638.pdf,Re-engaging with the past: recapitulation of encoding operations during episodic retrieval,2014 +372,FERET,feret,51.0784038,-114.1287077,University of Calgary,edu,5bf954ca82b42865c49eef4b064278b82f3b38de,citation,http://pdfs.semanticscholar.org/80b0/045eed3a1fc9ab502963f6fb3e6f70a2f638.pdf,Re-engaging with the past: recapitulation of encoding operations during episodic retrieval,2014 +373,FERET,feret,-33.8840504,151.1992254,University of Technology,edu,ca458f189c1167e42d3a5aaf81efc92a4c008976,citation,https://doi.org/10.1109/TIP.2012.2202678,Double Shrinking Sparse Dimension Reduction,2013 +374,FERET,feret,35.14479945,33.90492318,Eastern Mediterranean University,edu,b20a8fc556aed9ab798fcf31e4f971dbc67a9edf,citation,http://pdfs.semanticscholar.org/b20a/8fc556aed9ab798fcf31e4f971dbc67a9edf.pdf,An Adept Segmentation Algorithm and Its Application to the Extraction of Local Regions Containing Fiducial Points,2006 +375,FERET,feret,51.0784038,-114.1287077,University of Calgary,edu,80290f2a38741e20a38de7c00d80353604343ef8,citation,http://pdfs.semanticscholar.org/8029/0f2a38741e20a38de7c00d80353604343ef8.pdf,Eigenfeature Optimization for Face Detection,2004 +376,FERET,feret,22.304572,114.17976285,Hong Kong Polytechnic University,edu,4a24d41aef0041ef82916d2316eea86f6c45c47f,citation,http://pdfs.semanticscholar.org/4a24/d41aef0041ef82916d2316eea86f6c45c47f.pdf,Impact of Full Rank Principal Component Analysis on Classification Algorithms for Face Recognition,2012 +377,FERET,feret,45.7413921,126.62552755,Harbin Institute of Technology,edu,4a24d41aef0041ef82916d2316eea86f6c45c47f,citation,http://pdfs.semanticscholar.org/4a24/d41aef0041ef82916d2316eea86f6c45c47f.pdf,Impact of Full Rank Principal Component Analysis on Classification Algorithms for Face Recognition,2012 +378,FERET,feret,22.6481521,88.376817,"Indian Statistical Institute, Kolkata",edu,7c7fb5c70bdabe8442c46c791fb2db00c490410b,citation,http://pdfs.semanticscholar.org/7c7f/b5c70bdabe8442c46c791fb2db00c490410b.pdf,Human Face Recognition using Gabor based Kernel Entropy Component Analysis,2012 +379,FERET,feret,22.5611537,88.41310194,Jadavpur University,edu,7c7fb5c70bdabe8442c46c791fb2db00c490410b,citation,http://pdfs.semanticscholar.org/7c7f/b5c70bdabe8442c46c791fb2db00c490410b.pdf,Human Face Recognition using Gabor based Kernel Entropy Component Analysis,2012 +380,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,19e62a56b6772bbd37dfc6b8f948e260dbb474f5,citation,http://pdfs.semanticscholar.org/19e6/2a56b6772bbd37dfc6b8f948e260dbb474f5.pdf,Cross-Domain Metric Learning Based on Information Theory,2014 +381,FERET,feret,31.83907195,117.26420748,University of Science and Technology of China,edu,19e62a56b6772bbd37dfc6b8f948e260dbb474f5,citation,http://pdfs.semanticscholar.org/19e6/2a56b6772bbd37dfc6b8f948e260dbb474f5.pdf,Cross-Domain Metric Learning Based on Information Theory,2014 +382,FERET,feret,40.00229045,116.32098908,Tsinghua University,edu,c1cf5dda56c72b65e86f3a678f76644f22212748,citation,http://pdfs.semanticscholar.org/c1cf/5dda56c72b65e86f3a678f76644f22212748.pdf,Face Hallucination via Semi-kernel Partial Least Squares,2015 +383,FERET,feret,29.7207902,-95.34406271,University of Houston,edu,e69ac130e3c7267cce5e1e3d9508ff76eb0e0eef,citation,http://pdfs.semanticscholar.org/e69a/c130e3c7267cce5e1e3d9508ff76eb0e0eef.pdf,Addressing the illumination challenge in two-dimensional face recognition: a survey,2015 +384,FERET,feret,42.3889785,-72.5286987,University of Massachusetts,edu,e39a66a6d1c5e753f8e6c33cd5d335f9bc9c07fa,citation,https://pdfs.semanticscholar.org/e39a/66a6d1c5e753f8e6c33cd5d335f9bc9c07fa.pdf,Weakly Supervised Learning for Unconstrained Face Processing,2014 +385,FERET,feret,48.14955455,11.56775314,Technical University Munich,edu,bc9003ad368cb79d8a8ac2ad025718da5ea36bc4,citation,https://pdfs.semanticscholar.org/bc90/03ad368cb79d8a8ac2ad025718da5ea36bc4.pdf,Facial expression recognition with a three-dimensional face model,2011 +386,FERET,feret,50.89273635,-1.39464295,University of Southampton,edu,d6b0a1f6dfb995436b45045b56e966d8e57b0990,citation,https://pdfs.semanticscholar.org/d6b0/a1f6dfb995436b45045b56e966d8e57b0990.pdf,Gait analysis and recognition for automated visual surveillance,2008 +387,FERET,feret,22.3874201,114.2082222,Hong Kong Baptist University,edu,02ae77f4c289426f18e83ce6e295d39538fb0fcc,citation,http://pdfs.semanticscholar.org/02ae/77f4c289426f18e83ce6e295d39538fb0fcc.pdf,Dependency Modeling for Information Fusion with Applications in Visual Recognition,2013 +388,FERET,feret,23.883312,90.2693921,Jahangirnagar University,edu,078549cb5474b024d203f96954646cacef219682,citation,http://pdfs.semanticscholar.org/1b42/0d5cf66e60b540ecdb352a287c85d9d7e2a4.pdf,"Single Image Face Recognition based on Gabor, Sobel and Local Ternary Pattern",2015 +389,FERET,feret,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,3dfb822e16328e0f98a47209d7ecd242e4211f82,citation,https://arxiv.org/pdf/1708.08197.pdf,Cross-Age LFW: A Database for Studying Cross-Age Face Recognition in Unconstrained Environments,2017 +390,FERET,feret,37.4102193,-122.05965487,Carnegie Mellon University,edu,1320c42b348c5342c2ad6a60e3ded3ff0bd56f7f,citation,https://doi.org/10.1007/s11263-007-0119-z,"A Viewpoint Invariant, Sparsely Registered, Patch Based, Face Verifier",2007 +391,FERET,feret,37.50882,126.9619,Chung-Ang University,edu,17cf6195fd2dfa42670dc7ada476e67b381b8f69,citation,http://pdfs.semanticscholar.org/17cf/6195fd2dfa42670dc7ada476e67b381b8f69.pdf,Automatic Face Region Tracking for Highly Accurate Face Recognition in Unconstrained Environments,2003 +392,FERET,feret,37.403917,127.159786,Korea Electronics Technology Institute,edu,17cf6195fd2dfa42670dc7ada476e67b381b8f69,citation,http://pdfs.semanticscholar.org/17cf/6195fd2dfa42670dc7ada476e67b381b8f69.pdf,Automatic Face Region Tracking for Highly Accurate Face Recognition in Unconstrained Environments,2003 +393,FERET,feret,35.9542493,-83.9307395,University of Tennessee,edu,17cf6195fd2dfa42670dc7ada476e67b381b8f69,citation,http://pdfs.semanticscholar.org/17cf/6195fd2dfa42670dc7ada476e67b381b8f69.pdf,Automatic Face Region Tracking for Highly Accurate Face Recognition in Unconstrained Environments,2003 +394,FERET,feret,39.94976005,116.33629046,Beijing Jiaotong University,edu,b5930275813a7e7a1510035a58dd7ba7612943bc,citation,http://pdfs.semanticscholar.org/b593/0275813a7e7a1510035a58dd7ba7612943bc.pdf,Face Recognition Using L-Fisherfaces,2010 +395,FERET,feret,25.0410728,121.6147562,Institute of Information Science,edu,b5930275813a7e7a1510035a58dd7ba7612943bc,citation,http://pdfs.semanticscholar.org/b593/0275813a7e7a1510035a58dd7ba7612943bc.pdf,Face Recognition Using L-Fisherfaces,2010 +396,FERET,feret,36.00146435,120.11624057,Shandong University of Science and Technology,edu,b5930275813a7e7a1510035a58dd7ba7612943bc,citation,http://pdfs.semanticscholar.org/b593/0275813a7e7a1510035a58dd7ba7612943bc.pdf,Face Recognition Using L-Fisherfaces,2010 +397,FERET,feret,44.9689836,-93.20941629,Fraser University,edu,281cc188bf7588681cdf8e325b0ed13ac927e2e6,citation,https://pdfs.semanticscholar.org/281c/c188bf7588681cdf8e325b0ed13ac927e2e6.pdf,A Multi-Modal Person Recognition System for Social Robots,2018 +398,FERET,feret,40.00229045,116.32098908,Tsinghua University,edu,89ac06ccbc410224f4d05d5ae8fa46c4fe3cbe0f,citation,http://pdfs.semanticscholar.org/947e/53c1d9035df85a3bc1b852928acbe889daf4.pdf,Video Based Face Verification,2001 +399,FERET,feret,1.3484104,103.68297965,Nanyang Technological University,edu,72b4b8f4a9f25cac5686231b44a2220945fd2ff6,citation,http://pdfs.semanticscholar.org/72b4/b8f4a9f25cac5686231b44a2220945fd2ff6.pdf,Face Verification Using Modeled Eigenspectrum,2008 +400,FERET,feret,25.01682835,121.53846924,National Taiwan University,edu,95289007f2f336e6636cf8f920225b8d47c6e94f,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6472796,Automatic Training Image Acquisition and Effective Feature Selection From Community-Contributed Photos for Facial Attribute Detection,2013 +401,FERET,feret,25.0411727,121.6146518,"Academia Sinica, Taiwan",edu,95289007f2f336e6636cf8f920225b8d47c6e94f,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6472796,Automatic Training Image Acquisition and Effective Feature Selection From Community-Contributed Photos for Facial Attribute Detection,2013 +402,FERET,feret,51.24303255,-0.59001382,University of Surrey,edu,749ebfa344b6d27de898d619cea0b28ad3894ff2,citation,http://pdfs.semanticscholar.org/749e/bfa344b6d27de898d619cea0b28ad3894ff2.pdf,Predicting Biometric Authentication System Performance Across Different Application Conditions: A Bootstrap Enhanced Parametric Approach,2007 +403,FERET,feret,52.9387428,-1.20029569,University of Nottingham,edu,e3bb87e858bc752436c7a8da3fca68b2dacbf3e8,citation,https://pdfs.semanticscholar.org/e3bb/87e858bc752436c7a8da3fca68b2dacbf3e8.pdf,On the Evaluation of Methods for the Recovery of Plant Root Systems from X-ray Computed Tomography Images,2015 +404,FERET,feret,45.42580475,-75.68740118,University of Ottawa,edu,a94cae786d515d3450d48267e12ca954aab791c4,citation,http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf,YawDD: a yawning detection dataset,2014 +405,FERET,feret,34.7361066,10.7427275,"University of Sfax, Tunisia",edu,8a3bb63925ac2cdf7f9ecf43f71d65e210416e17,citation,https://www.math.uh.edu/~dlabate/ShearFace_ICPR2014.pdf,ShearFace: Efficient Extraction of Anisotropic Features for Face Recognition,2014 +406,FERET,feret,16.46007565,102.81211798,Khon Kaen University,edu,31dd6bafd6e7c6095eb8d0591abac3b0106a75e3,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8457336,Face Recognition In Unconstrained Environment,2018 +407,FERET,feret,51.24303255,-0.59001382,University of Surrey,edu,beea33ccd9423d48d6cfb928469bbe7841e63e73,citation,http://pdfs.semanticscholar.org/beea/33ccd9423d48d6cfb928469bbe7841e63e73.pdf,DIARETDB1 diabetic retinopathy database and evaluation protocol,2007 +408,FERET,feret,22.3386304,114.2620337,Hong Kong University of Science and Technology,edu,4cf0c6d3da8e20d6f184a4eaa6865d61680982b8,citation,http://pdfs.semanticscholar.org/4cf0/c6d3da8e20d6f184a4eaa6865d61680982b8.pdf,Face recognition based on 3D mesh model,2004 +409,FERET,feret,-33.95828745,18.45997349,University of Cape Town,edu,ba6082291b018b14f8da4f96afc631918bad3a1b,citation,https://pdfs.semanticscholar.org/3f5b/0cf2ed392045026ea0d1d67145d0400e516f.pdf,"Calibration , Recognition , and Shape from Silhouettes of Stones",2007 +410,FERET,feret,39.1254938,-77.22293475,National Institute of Standards and Technology,edu,b1e218046a28d10ec0be3272809608dea378eddc,citation,https://pdfs.semanticscholar.org/12c5/66e2eee7bbaf45b894e7282f87f00f1db20a.pdf,Overview of the Multiple Biometrics Grand Challenge,2009 +411,FERET,feret,39.9922379,116.30393816,Peking University,edu,15122ef718265beb4cb1a74e5d1f41c5edcb4ba5,citation,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2005.165,On the Euclidean distance of images,2005 +412,FERET,feret,47.00646895,-120.5367304,Central Washington University,edu,9d6e60d49e92361f8f558013065dfa67043dd337,citation,https://pdfs.semanticscholar.org/9d6e/60d49e92361f8f558013065dfa67043dd337.pdf,Applications of Computational Geometry and Computer Vision,2016 +413,FERET,feret,22.3874201,114.2082222,Hong Kong Baptist University,edu,121839d3254820b7017b07ef47acc89b975286a9,citation,https://pdfs.semanticscholar.org/92a2/5b281f1637d125cefefcbfc382f48f456f4c.pdf,Feature Extraction for Incomplete Data via Low-rank Tucker Decomposition,2017 +414,FERET,feret,23.1353836,113.29470496,Guangdong University of Technology,edu,121839d3254820b7017b07ef47acc89b975286a9,citation,https://pdfs.semanticscholar.org/92a2/5b281f1637d125cefefcbfc382f48f456f4c.pdf,Feature Extraction for Incomplete Data via Low-rank Tucker Decomposition,2017 +415,FERET,feret,35.704514,51.40972058,Amirkabir University of Technology,edu,88ed558bff3600f5354963d1abe762309f66111e,citation,https://doi.org/10.1109/TIFS.2015.2393553,Real-World and Rapid Face Recognition Toward Pose and Expression Variations via Feature Library Matrix,2015 +416,FERET,feret,35.6037444,53.43445877,Semnan University,edu,88ed558bff3600f5354963d1abe762309f66111e,citation,https://doi.org/10.1109/TIFS.2015.2393553,Real-World and Rapid Face Recognition Toward Pose and Expression Variations via Feature Library Matrix,2015 +417,FERET,feret,45.7413921,126.62552755,Harbin Institute of Technology,edu,016a8ed8f6ba49bc669dbd44de4ff31a79963078,citation,https://doi.org/10.1109/ICASSP.2004.1327215,Face relighting for face recognition under generic illumination,2004 +418,FERET,feret,23.09461185,113.28788994,Sun Yat-Sen University,edu,44f48a4b1ef94a9104d063e53bf88a69ff0f55f3,citation,http://pdfs.semanticscholar.org/44f4/8a4b1ef94a9104d063e53bf88a69ff0f55f3.pdf,Automatically Building Face Datasets of New Domains from Weakly Labeled Data with Pretrained Models,2016 +419,FERET,feret,45.7413921,126.62552755,Harbin Institute of Technology,edu,2fd007088a75916d0bf50c493d94f950bf55c5e6,citation,https://doi.org/10.1007/978-981-10-7302-1_1,Projective Representation Learning for Discriminative Face Recognition,2017 +420,FERET,feret,43.66333345,-79.39769975,University of Toronto,edu,0cb613bf519b90d08d2f12623b41f02c638cea63,citation,http://koasas.kaist.ac.kr/bitstream/10203/22675/1/Face%20Annotation%20for%20Personal%20Photos%20Using%20Context%20Assisted%20Face%20Recognition.pdf,Face annotation for personal photos using context-assisted face recognition,2008 +421,FERET,feret,24.7925484,120.9951183,National Tsing Hua University,edu,30b6811205b42e92d7a82c606d4521319764250b,citation,https://doi.org/10.1109/APSIPA.2013.6694367,Low cost illumination invariant face recognition by down-up sampling self quotient image,2013 +422,FERET,feret,50.0764296,14.41802312,Czech Technical University,edu,ff69da3510f5ffed224069faf62036e1aa9b6d26,citation,https://pdfs.semanticscholar.org/a256/3501ffd5a840fa4df0f3911a82e117df2f7f.pdf,Extended Set of Local Binary Patterns for Rapid Object Detection,2010 +423,FERET,feret,32.0575279,118.78682252,Southeast University,edu,c207fd762728f3da4cddcfcf8bf19669809ab284,citation,http://pdfs.semanticscholar.org/c207/fd762728f3da4cddcfcf8bf19669809ab284.pdf,Face Alignment Using Boosting and Evolutionary Search,2009 +424,FERET,feret,52.2380139,6.8566761,University of Twente,edu,c207fd762728f3da4cddcfcf8bf19669809ab284,citation,http://pdfs.semanticscholar.org/c207/fd762728f3da4cddcfcf8bf19669809ab284.pdf,Face Alignment Using Boosting and Evolutionary Search,2009 +425,FERET,feret,51.49887085,-0.17560797,Imperial College London,edu,23a450a075d752f1ec2b1e5e225de13d3bc37636,citation,http://pdfs.semanticscholar.org/23a4/50a075d752f1ec2b1e5e225de13d3bc37636.pdf,Subspace Learning in Krein Spaces: Complete Kernel Fisher Discriminant Analysis with Indefinite Kernels,2012 +426,FERET,feret,37.4102193,-122.05965487,Carnegie Mellon University,edu,521c2e9892eb22f65ba5b0d4c8d2f4c096d9fdf3,citation,http://www.ri.cmu.edu/pub_files/pub4/gross_ralph_2006_2/gross_ralph_2006_2.pdf,Model-Based Face De-Identification,2006 +427,FERET,feret,13.65450525,100.49423171,Robotics Institute,edu,521c2e9892eb22f65ba5b0d4c8d2f4c096d9fdf3,citation,http://www.ri.cmu.edu/pub_files/pub4/gross_ralph_2006_2/gross_ralph_2006_2.pdf,Model-Based Face De-Identification,2006 +428,FERET,feret,25.01682835,121.53846924,National Taiwan University,edu,91e507d2d8375bf474f6ffa87788aa3e742333ce,citation,http://pdfs.semanticscholar.org/91e5/07d2d8375bf474f6ffa87788aa3e742333ce.pdf,Robust Face Recognition Using Probabilistic Facial Trait Code,2010 +429,FERET,feret,39.2899685,-76.62196103,University of Maryland,edu,744b794f0047b008c517752fc9bb1100e5f120cc,citation,http://doi.ieeecomputersociety.org/10.1109/ICPR.2004.1333736,Multiple-exemplar discriminant analysis for face recognition,2004 +430,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,44d93039eec244083ac7c46577b9446b3a071f3e,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1415571,Empirical comparisons of several preprocessing methods for illumination insensitive face recognition,2005 +431,FERET,feret,39.1254938,-77.22293475,National Institute of Standards and Technology,edu,2d435b7510eeda648dc34d5b8ac921499d525218,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2007.383395,Improving Variance Estimation in Biometric Systems,2007 +432,FERET,feret,38.8964679,-104.8050594,University of Colorado at Colorado Springs,edu,2d435b7510eeda648dc34d5b8ac921499d525218,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2007.383395,Improving Variance Estimation in Biometric Systems,2007 +433,FERET,feret,42.718568,-84.47791571,Michigan State University,edu,ab7bcbaa9e77d35634302b021d47e7889628a88d,citation,http://pdfs.semanticscholar.org/ab7b/cbaa9e77d35634302b021d47e7889628a88d.pdf,FACESKETCHID: A SYSTEM FOR FACIAL SKETCH TO MUGSHOT MATCHING by Scott,2014 +434,FERET,feret,45.7413921,126.62552755,Harbin Institute of Technology,edu,92017bf2df5f6532d39c624ea209f37bb6728097,citation,http://pdfs.semanticscholar.org/9201/7bf2df5f6532d39c624ea209f37bb6728097.pdf,"Attention Driven Face Recognition, Learning from Human Vision System",2011 +435,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,92017bf2df5f6532d39c624ea209f37bb6728097,citation,http://pdfs.semanticscholar.org/9201/7bf2df5f6532d39c624ea209f37bb6728097.pdf,"Attention Driven Face Recognition, Learning from Human Vision System",2011 +436,FERET,feret,39.9922379,116.30393816,Peking University,edu,92017bf2df5f6532d39c624ea209f37bb6728097,citation,http://pdfs.semanticscholar.org/9201/7bf2df5f6532d39c624ea209f37bb6728097.pdf,"Attention Driven Face Recognition, Learning from Human Vision System",2011 +437,FERET,feret,35.704514,51.40972058,Amirkabir University of Technology,edu,841bf196ee0086c805bd5d1d0bddfadc87e424ec,citation,http://pdfs.semanticscholar.org/841b/f196ee0086c805bd5d1d0bddfadc87e424ec.pdf,Locally Kernel-based Nonlinear Regression for Face Recognition,2012 +438,FERET,feret,34.8452999,48.5596212,Islamic Azad University,edu,841bf196ee0086c805bd5d1d0bddfadc87e424ec,citation,http://pdfs.semanticscholar.org/841b/f196ee0086c805bd5d1d0bddfadc87e424ec.pdf,Locally Kernel-based Nonlinear Regression for Face Recognition,2012 +439,FERET,feret,55.87231535,-4.28921784,University of Glasgow,edu,40055c342c19ab492df04dae2e186cd0d6b5dc5e,citation,http://pdfs.semanticscholar.org/a406/ad4bdf50f696191e7472b7a41d9d57ff046c.pdf,Robust representations for face recognition: the power of averages.,2005 +440,FERET,feret,1.2988926,103.7873107,"A*STAR, Singapore",edu,c444c4dab97dd6d6696f56c1cacda051dde60448,citation,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.37,Multiview Face Detection and Registration Requiring Minimal Manual Intervention,2013 +441,FERET,feret,1.3484104,103.68297965,Nanyang Technological University,edu,c444c4dab97dd6d6696f56c1cacda051dde60448,citation,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.37,Multiview Face Detection and Registration Requiring Minimal Manual Intervention,2013 +442,FERET,feret,32.8785349,-117.2358307,"Tijuana Institute of Technology, Mexico",edu,235bebe7d0db37e6727dfa1246663be34027d96b,citation,https://doi.org/10.1109/NAFIPS.2016.7851625,General Type-2 fuzzy edge detectors applied to face recognition systems,2016 +443,FERET,feret,32.0565957,118.77408833,Nanjing University,edu,65b1760d9b1541241c6c0222cc4ee9df078b593a,citation,http://pdfs.semanticscholar.org/65b1/760d9b1541241c6c0222cc4ee9df078b593a.pdf,Enhanced Pictorial Structures for Precise Eye Localization Under Uncontrolled Conditions,2009 +444,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,27b9e75bcaf9e12127f7181bcb7f1fcb105462c4,citation,http://www.cbsr.ia.ac.cn/users/zlei/papers/LEI-LFD-FG-11.pdf,Local frequency descriptor for low-resolution face recognition,2011 +445,FERET,feret,65.0592157,25.46632601,University of Oulu,edu,27b9e75bcaf9e12127f7181bcb7f1fcb105462c4,citation,http://www.cbsr.ia.ac.cn/users/zlei/papers/LEI-LFD-FG-11.pdf,Local frequency descriptor for low-resolution face recognition,2011 +446,FERET,feret,44.97308605,-93.23708813,University of Minnesota,edu,aecd24f4a41eb6942375b9c03adcb7e137250b3f,citation,http://pdfs.semanticscholar.org/aecd/24f4a41eb6942375b9c03adcb7e137250b3f.pdf,Tensor Sparse Coding for Region Covariances,2010 +447,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,179f446aa297d6fe5c864b605286b946f85bb4ee,citation,http://lear.inrialpes.fr/people/triggs/events/iccv03/cdrom/iccv03/1449_wang.pdf,Fusion of static and dynamic body biometrics for gait recognition,2003 +448,FERET,feret,39.2899685,-76.62196103,University of Maryland,edu,3d741315108b95cdb56d312648f5ad1c002c9718,citation,http://pdfs.semanticscholar.org/3d74/1315108b95cdb56d312648f5ad1c002c9718.pdf,Image-based face recognition under illumination and pose variations.,2005 +449,FERET,feret,31.30104395,121.50045497,Fudan University,edu,8ca3cfb9595ebc5b36a25659f6bbf362f0b14ae3,citation,http://pdfs.semanticscholar.org/8ca3/cfb9595ebc5b36a25659f6bbf362f0b14ae3.pdf,Spectral Clustering Based Null Space Linear Discriminant Analysis (SNLDA),2007 +450,FERET,feret,39.94976005,116.33629046,Beijing Jiaotong University,edu,8ca3cfb9595ebc5b36a25659f6bbf362f0b14ae3,citation,http://pdfs.semanticscholar.org/8ca3/cfb9595ebc5b36a25659f6bbf362f0b14ae3.pdf,Spectral Clustering Based Null Space Linear Discriminant Analysis (SNLDA),2007 +451,FERET,feret,34.8452999,48.5596212,Islamic Azad University,edu,908a899c716d63bd327dee4a72061db5674bdc92,citation,http://pdfs.semanticscholar.org/908a/899c716d63bd327dee4a72061db5674bdc92.pdf,Experiments with Face Recognition Using a Novel Approach Based on CVQ Technique,2012 +452,FERET,feret,22.304572,114.17976285,Hong Kong Polytechnic University,edu,9f5383ec6ee5e810679e4a7e0a3f153f0ed3bb73,citation,http://pdfs.semanticscholar.org/9f53/83ec6ee5e810679e4a7e0a3f153f0ed3bb73.pdf,3D Shape and Pose Estimation of Face Images Using the Nonlinear Least-Squares Model,2010 +453,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,9f5383ec6ee5e810679e4a7e0a3f153f0ed3bb73,citation,http://pdfs.semanticscholar.org/9f53/83ec6ee5e810679e4a7e0a3f153f0ed3bb73.pdf,3D Shape and Pose Estimation of Face Images Using the Nonlinear Least-Squares Model,2010 +454,FERET,feret,37.4102193,-122.05965487,Carnegie Mellon University,edu,07c90e85ac0f74b977babe245dea0f0abcf177e3,citation,http://pdfs.semanticscholar.org/07c9/0e85ac0f74b977babe245dea0f0abcf177e3.pdf,An Image Preprocessing Algorithm for Illumination Invariant Face Recognition,2003 +455,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,0faab61c742609be74463d30b0eb1118dba4a4f3,citation,http://pdfs.semanticscholar.org/0faa/b61c742609be74463d30b0eb1118dba4a4f3.pdf,Null Space Approach of Fisher Discriminant Analysis for Face Recognition,2004 +456,FERET,feret,24.7246403,46.62335012,King Saud University,edu,1319dbeaa28f8a9b19e03a7631e96393e08a07fa,citation,http://pdfs.semanticscholar.org/1319/dbeaa28f8a9b19e03a7631e96393e08a07fa.pdf,Gender Recognition Using Fusion of Local and Global Facial Features,2013 +457,FERET,feret,25.7173339,-80.27866887,University of Miami,edu,48381007b85e8a3b74e5401b2dfc1a5dfc897622,citation,http://pdfs.semanticscholar.org/4838/1007b85e8a3b74e5401b2dfc1a5dfc897622.pdf,Sparse Representation and Dictionary Learning for Biometrics and Object Tracking,2015 +458,FERET,feret,33.776033,-84.39884086,Georgia Institute of Technology,edu,852e7df8794b15413f1d71628939c3cc28580b12,citation,http://pdfs.semanticscholar.org/852e/7df8794b15413f1d71628939c3cc28580b12.pdf,Boosted Audio-Visual HMM for Speech Reading,2003 +459,FERET,feret,13.65450525,100.49423171,Robotics Institute,edu,c5c1575565e04cd0afc57d7ac7f7a154c573b38f,citation,https://pdfs.semanticscholar.org/010a/f49ddb10c51b7913c2533910dd28ca39411c.pdf,Face Refinement through a Gradient Descent Alignment Approach,2006 +460,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,da7bbfa905d88834f8929cb69f41a1b683639f4b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6199752,Discriminant analysis with Gabor phase for robust face recognition,2012 +461,FERET,feret,32.05765485,118.7550004,HoHai University,edu,da7bbfa905d88834f8929cb69f41a1b683639f4b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6199752,Discriminant analysis with Gabor phase for robust face recognition,2012 +462,FERET,feret,34.1235825,108.83546,Xidian University,edu,da7bbfa905d88834f8929cb69f41a1b683639f4b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6199752,Discriminant analysis with Gabor phase for robust face recognition,2012 +463,FERET,feret,40.5709358,-105.08655256,Colorado State University,edu,546cbbb897022096511f6a71259e3b99c558224d,citation,http://pdfs.semanticscholar.org/8a17/e16de6b932ec42e269621e29d99e46591fef.pdf,PCA vs. ICA: A Comparison on the FERET Data Set,2002 +464,FERET,feret,24.7925484,120.9951183,National Tsing Hua University,edu,6e7afe55d363adf80330116968163c7e9500f53b,citation,http://www.cs.nthu.edu.tw/~cchen/Research/2007EitFace.pdf,SVD-based projection for face recognition,2007 +465,FERET,feret,22.53521465,113.9315911,Shenzhen University,edu,2a9946fb626a58d376fb1491ca8bf8fb4f68dcf9,citation,http://pdfs.semanticscholar.org/2a99/46fb626a58d376fb1491ca8bf8fb4f68dcf9.pdf,Enlarge the Training Set Based on Inter-Class Relationship for Face Recognition from One Image per Person,2013 +466,FERET,feret,34.0224149,-118.28634407,University of Southern California,edu,38c61c11554135e09a2353afa536d010c7a53cbb,citation,http://pdfs.semanticscholar.org/38c6/1c11554135e09a2353afa536d010c7a53cbb.pdf,Learning the Detection of Faces in Natural Images,2002 +467,FERET,feret,55.70229715,37.53179777,Lomonosov Moscow State University,edu,6bfb0f8dd1a2c0b44347f09006dc991b8a08559c,citation,https://www.computer.org/web/csdl/index/-/csdl/proceedings/fg/2013/5545/00/06553724.pdf,Multiview discriminative learning for age-invariant face recognition,2013 +468,FERET,feret,1.3037257,103.7737763,"Advanced Digital Sciences Center, Singapore",edu,6bfb0f8dd1a2c0b44347f09006dc991b8a08559c,citation,https://www.computer.org/web/csdl/index/-/csdl/proceedings/fg/2013/5545/00/06553724.pdf,Multiview discriminative learning for age-invariant face recognition,2013 +469,FERET,feret,1.3484104,103.68297965,Nanyang Technological University,edu,6bfb0f8dd1a2c0b44347f09006dc991b8a08559c,citation,https://www.computer.org/web/csdl/index/-/csdl/proceedings/fg/2013/5545/00/06553724.pdf,Multiview discriminative learning for age-invariant face recognition,2013 +470,FERET,feret,40.11116745,-88.22587665,"University of Illinois, Urbana-Champaign",edu,6bfb0f8dd1a2c0b44347f09006dc991b8a08559c,citation,https://www.computer.org/web/csdl/index/-/csdl/proceedings/fg/2013/5545/00/06553724.pdf,Multiview discriminative learning for age-invariant face recognition,2013 +471,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,d33b26794ea6d744bba7110d2d4365b752d7246f,citation,http://pdfs.semanticscholar.org/d33b/26794ea6d744bba7110d2d4365b752d7246f.pdf,Transfer Feature Representation via Multiple Kernel Learning,2015 +472,FERET,feret,51.2975344,1.07296165,University of Kent,edu,55f94957f753e74f6f0170a45dee746c5b013edb,citation,http://pdfs.semanticscholar.org/55f9/4957f753e74f6f0170a45dee746c5b013edb.pdf,Face Recognition Using Balanced Pairwise Classifier Training,2009 +473,FERET,feret,60.18558755,24.8242733,Aalto University,edu,6dbe76f51091ca6a626a62846a946ce687c3dbe8,citation,http://pdfs.semanticscholar.org/6dbe/76f51091ca6a626a62846a946ce687c3dbe8.pdf,INCREMENTAL OBJECT MATCHING WITH PROBABILISTIC METHODS Doctoral dissertation,0 +474,FERET,feret,43.614386,7.071125,EURECOM,edu,314ad104401c78a83cfe8018412b6a2f33340fc6,citation,http://www.eurecom.fr/fr/publication/4966/download/sec-publi-4966.pdf,"Privacy protecting, intelligibility preserving video surveillance",2016 +475,FERET,feret,42.718568,-84.47791571,Michigan State University,edu,08f6ad0a3e75b715852f825d12b6f28883f5ca05,citation,http://www.cse.msu.edu/biometrics/Publications/Face/JainKlarePark_FaceRecognition_ChallengesinForensics_FG11.pdf,Face recognition: Some challenges in forensics,2011 +476,FERET,feret,-33.8809651,151.20107299,University of Technology Sydney,edu,3b64efa817fd609d525c7244a0e00f98feacc8b4,citation,http://doi.acm.org/10.1145/2845089,A Comprehensive Survey on Pose-Invariant Face Recognition,2016 +477,FERET,feret,-35.2776999,149.118527,Australian National University,edu,102cfd088799405d47c824735dc1356e5835dce7,citation,http://pdfs.semanticscholar.org/d5d0/d25663ec0ff8099e613d2278f8a673b9729f.pdf,Learning-based Face Synthesis for Pose-Robust Recognition from Single Image,2009 +478,FERET,feret,-35.23656905,149.08446994,University of Canberra,edu,102cfd088799405d47c824735dc1356e5835dce7,citation,http://pdfs.semanticscholar.org/d5d0/d25663ec0ff8099e613d2278f8a673b9729f.pdf,Learning-based Face Synthesis for Pose-Robust Recognition from Single Image,2009 +479,FERET,feret,25.00823205,121.53577153,National Taiwan Normal University,edu,fb6cc23fd6bd43bd4cacf6a57cd2c7c8dfe5269d,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339084,An experimental study on content-based face annotation of photos,2009 +480,FERET,feret,28.59899755,-81.19712501,University of Central Florida,edu,2910fcd11fafee3f9339387929221f4fc1160973,citation,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W16/papers/Becker_Evaluating_Open-Universe_Face_2013_CVPR_paper.pdf,Evaluating Open-Universe Face Identification on the Web,2013 +481,FERET,feret,13.65450525,100.49423171,Robotics Institute,edu,2910fcd11fafee3f9339387929221f4fc1160973,citation,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W16/papers/Becker_Evaluating_Open-Universe_Face_2013_CVPR_paper.pdf,Evaluating Open-Universe Face Identification on the Web,2013 +482,FERET,feret,22.304572,114.17976285,Hong Kong Polytechnic University,edu,e0ea8ef91bd0a35aec31c9a493137163b4f042b6,citation,http://pdfs.semanticscholar.org/e0ea/8ef91bd0a35aec31c9a493137163b4f042b6.pdf,Sparse representation with nearest subspaces for face recognition,2012 +483,FERET,feret,22.53521465,113.9315911,Shenzhen University,edu,e0ea8ef91bd0a35aec31c9a493137163b4f042b6,citation,http://pdfs.semanticscholar.org/e0ea/8ef91bd0a35aec31c9a493137163b4f042b6.pdf,Sparse representation with nearest subspaces for face recognition,2012 +484,FERET,feret,34.80809035,135.45785218,Osaka University,edu,29639a071f67a6867000b53bcb97b37b3d090319,citation,http://pdfs.semanticscholar.org/2963/9a071f67a6867000b53bcb97b37b3d090319.pdf,Gait Identification Considering Body Tilt by Walking Direction Changes,2008 +485,FERET,feret,-22.8148374,-47.0647708,University of Campinas (UNICAMP),edu,b161d261fabb507803a9e5834571d56a3b87d147,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8122913,Gender recognition from face images using a geometric descriptor,2017 +486,FERET,feret,28.54632595,77.27325504,Indian Institute of Technology Delhi,edu,5539c0bee8fcf825e63a1abaa950615ebd9c6b49,citation,http://pdfs.semanticscholar.org/5539/c0bee8fcf825e63a1abaa950615ebd9c6b49.pdf,Car Detection and Recognition Based on Rear View and Back Light Features,2014 +487,FERET,feret,37.4102193,-122.05965487,Carnegie Mellon University,edu,691463f3f7acb0502e21b40958c1ecdee16d1fe0,citation,http://pdfs.semanticscholar.org/eb46/25ad9143196021c3def560d025d346c46909.pdf,Adaptive Markov Random Fields for Example-Based Super-resolution of Faces,2006 +488,FERET,feret,51.5231607,-0.1282037,University College London,edu,0a2ddf88bd1a6c093aad87a8c7f4150bfcf27112,citation,http://pdfs.semanticscholar.org/0a2d/df88bd1a6c093aad87a8c7f4150bfcf27112.pdf,Patch-based models for visual object classes,2011 +489,FERET,feret,45.7413921,126.62552755,Harbin Institute of Technology,edu,82d5e927c4f1429c07552bfc7bebd5f0e3f2f444,citation,http://pdfs.semanticscholar.org/82d5/e927c4f1429c07552bfc7bebd5f0e3f2f444.pdf,Histogram Sequence of Local Gabor Binary Pattern for Face Description and Identification,2006 +490,FERET,feret,41.10427915,29.02231159,Istanbul Technical University,edu,e9a8a88b47d0bc20579f39eba1c380b07edc244f,citation,https://pdfs.semanticscholar.org/e9a8/a88b47d0bc20579f39eba1c380b07edc244f.pdf,Effects of the Facial and Racial Features on Gender Classification,2010 +491,FERET,feret,35.3341487,139.4943356,"Azbil Corporation, Kawana, Japan",company,982fcead58be419e4f34df6e806204674a4bc579,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6613012,Performance improvement of face recognition algorithms using occluded-region detection,2013 +492,FERET,feret,38.2530945,140.8736593,Tohoku University,edu,982fcead58be419e4f34df6e806204674a4bc579,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6613012,Performance improvement of face recognition algorithms using occluded-region detection,2013 +493,FERET,feret,34.1235825,108.83546,Xidian University,edu,61f4429c085e8a93c4d7bdb9bff6fac38e58e5c6,citation,http://pdfs.semanticscholar.org/61f4/429c085e8a93c4d7bdb9bff6fac38e58e5c6.pdf,Discriminant Neighborhood Structure Embedding Using Trace Ratio Criterion for Image Recognition,2015 +494,FERET,feret,22.42031295,114.20788644,Chinese University of Hong Kong,edu,ec645bbc34d3ed264516df8b1add4d0cd6c35631,citation,http://pdfs.semanticscholar.org/ec64/5bbc34d3ed264516df8b1add4d0cd6c35631.pdf,An improved Bayesian face recognition algorithm in PCA subspace,2003 +495,FERET,feret,23.09461185,113.28788994,Sun Yat-Sen University,edu,3356074f4896bf2af7f46749fdc212a99d4932a6,citation,http://pdfs.semanticscholar.org/3356/074f4896bf2af7f46749fdc212a99d4932a6.pdf,Learning Low-Rank Class-Specific Dictionary and Sparse Intra-Class Variant Dictionary for Face Recognition,2015 +496,FERET,feret,30.2931534,120.1620458,Zhejiang University of Technology,edu,3356074f4896bf2af7f46749fdc212a99d4932a6,citation,http://pdfs.semanticscholar.org/3356/074f4896bf2af7f46749fdc212a99d4932a6.pdf,Learning Low-Rank Class-Specific Dictionary and Sparse Intra-Class Variant Dictionary for Face Recognition,2015 +497,FERET,feret,13.65450525,100.49423171,Robotics Institute,edu,7f1f3d7b1a4e7fc895b77cb23b1119a6f13e4d3a,citation,http://pdfs.semanticscholar.org/7f1f/3d7b1a4e7fc895b77cb23b1119a6f13e4d3a.pdf,Multi-subregion based probabilistic approach toward pose-invariant face recognition,2003 +498,FERET,feret,35.907757,127.766922,Mando Corp.,company,1ab19e516b318ed6ab64822efe9b2328836107a4,citation,https://doi.org/10.1109/TIP.2010.2083674,Face Recognition System Using Multiple Face Model of Hybrid Fourier Feature Under Uncontrolled Illumination Variation,2011 +499,FERET,feret,37.566535,126.9779692,Samsung,company,1ab19e516b318ed6ab64822efe9b2328836107a4,citation,https://doi.org/10.1109/TIP.2010.2083674,Face Recognition System Using Multiple Face Model of Hybrid Fourier Feature Under Uncontrolled Illumination Variation,2011 +500,FERET,feret,39.9041999,116.4073963,"Samsung SAIT, Beijing",company,1ab19e516b318ed6ab64822efe9b2328836107a4,citation,https://doi.org/10.1109/TIP.2010.2083674,Face Recognition System Using Multiple Face Model of Hybrid Fourier Feature Under Uncontrolled Illumination Variation,2011 +501,FERET,feret,42.718568,-84.47791571,Michigan State University,edu,e506cdb250eba5e70c5147eb477fbd069714765b,citation,https://pdfs.semanticscholar.org/e506/cdb250eba5e70c5147eb477fbd069714765b.pdf,Heterogeneous Face Recognition,2012 +502,FERET,feret,34.80809035,135.45785218,Osaka University,edu,97dbcc592ed048db545c6e9ed1f27372e8d1d4b8,citation,http://pdfs.semanticscholar.org/97db/cc592ed048db545c6e9ed1f27372e8d1d4b8.pdf,Omnidirectional Gait Identification by Tilt Normalization and Azimuth View Transformation,2008 +503,FERET,feret,42.8298248,-73.87719385,GE Global Research Center,edu,50b40ec042047b4292fd9b650969d4efbd20c9ed,citation,http://cse.msu.edu/~liuxm/publication/Liu_GradientPursuit_FG2011.pdf,Optimal gradient pursuit for face alignment,2011 +504,FERET,feret,23.09461185,113.28788994,Sun Yat-Sen University,edu,7b47eb8faaf9c2275cdc70299b850ed649ceec62,citation,http://pdfs.semanticscholar.org/7b47/eb8faaf9c2275cdc70299b850ed649ceec62.pdf,1D-LDA vs. 2D-LDA: When is vector-based linear discriminant analysis better than matrix-based?,2008 +505,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,7b47eb8faaf9c2275cdc70299b850ed649ceec62,citation,http://pdfs.semanticscholar.org/7b47/eb8faaf9c2275cdc70299b850ed649ceec62.pdf,1D-LDA vs. 2D-LDA: When is vector-based linear discriminant analysis better than matrix-based?,2008 +506,FERET,feret,35.0116363,135.7680294,"OMRON Corporation, Kyoto, Japan",company,38e7f3fe450b126367ec358be9b4cc04e82fa8c7,citation,https://doi.org/10.1109/TIP.2014.2351265,Maximal Likelihood Correspondence Estimation for Face Recognition Across Pose,2014 +507,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,38e7f3fe450b126367ec358be9b4cc04e82fa8c7,citation,https://doi.org/10.1109/TIP.2014.2351265,Maximal Likelihood Correspondence Estimation for Face Recognition Across Pose,2014 +508,FERET,feret,33.776033,-84.39884086,Georgia Institute of Technology,edu,ffa23a8c988e57cf5fc21b56b522a4ee68f2f362,citation,https://pdfs.semanticscholar.org/ffa2/3a8c988e57cf5fc21b56b522a4ee68f2f362.pdf,Social game retrieval from unstructured videos,2010 +509,FERET,feret,40.47913175,-74.43168868,Rutgers University,edu,307c5c0a61e318a65bd65af694ce89c275fd7299,citation,http://pdfs.semanticscholar.org/307c/5c0a61e318a65bd65af694ce89c275fd7299.pdf,Face Mis-alignment Analysis by Multiple-Instance Subspace,2007 +510,FERET,feret,43.614386,7.071125,EURECOM,edu,43b6fb3146cb92bc36a2aab1368d8665af106a87,citation,https://doi.org/10.23919/EUSIPCO.2017.8081347,"ASePPI, an adaptive scrambling enabling privacy protection and intelligibility in H.264/AVC",2017 +511,FERET,feret,42.718568,-84.47791571,Michigan State University,edu,4b605e6a9362485bfe69950432fa1f896e7d19bf,citation,http://biometrics.cse.msu.edu/Publications/Face/BlantonAllenMillerKalkaJain_CVPRWB2016_HID.pdf,A Comparison of Human and Automated Face Verification Accuracy on Unconstrained Image Sets,2016 +512,FERET,feret,43.47061295,-80.54724732,University of Waterloo,edu,2cdf5952b5a1bea5d24917aa2f3fc2ee33568e9a,citation,https://arxiv.org/pdf/1507.01251v1.pdf,Autoencoding the retrieval relevance of medical images,2015 +513,FERET,feret,29.6328784,-82.3490133,University of Florida,edu,2a392cbdb2ac977ad9f969659111e20bd0e9611f,citation,http://pdfs.semanticscholar.org/2a39/2cbdb2ac977ad9f969659111e20bd0e9611f.pdf,Supplementary Material for Privacy Preserving Optics for Miniature Vision Sensors,2015 +514,FERET,feret,22.1240187,113.54510901,University of Macau,edu,7d61b70d922d20c52a4e629b09465076af71ddfd,citation,https://doi.org/10.1007/s10044-011-0258-2,Nonnegative class-specific entropy component analysis with adaptive step search criterion,2011 +515,FERET,feret,28.0599999,-82.41383619,University of South Florida,edu,bde276015ba6677f0ec5fbfc97d5c57daca9d391,citation,http://pdfs.semanticscholar.org/bde2/76015ba6677f0ec5fbfc97d5c57daca9d391.pdf,An Evaluation of Face and Ear Biometrics,2002 +516,FERET,feret,41.70456775,-86.23822026,University of Notre Dame,edu,bde276015ba6677f0ec5fbfc97d5c57daca9d391,citation,http://pdfs.semanticscholar.org/bde2/76015ba6677f0ec5fbfc97d5c57daca9d391.pdf,An Evaluation of Face and Ear Biometrics,2002 +517,FERET,feret,1.3037257,103.7737763,"Advanced Digital Sciences Center, Singapore",edu,856cc83a3121de89d4a6d9283afbcd5d7ef7aa2b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6417014,Image-to-Set Face Recognition Using Locality Repulsion Projections and Sparse Reconstruction-Based Similarity Measure,2013 +518,FERET,feret,1.3484104,103.68297965,Nanyang Technological University,edu,856cc83a3121de89d4a6d9283afbcd5d7ef7aa2b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6417014,Image-to-Set Face Recognition Using Locality Repulsion Projections and Sparse Reconstruction-Based Similarity Measure,2013 +519,FERET,feret,40.8419836,-73.94368971,Columbia University,edu,759a3b3821d9f0e08e0b0a62c8b693230afc3f8d,citation,http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf,Attribute and simile classifiers for face verification,2009 +520,FERET,feret,26.513188,80.23651945,Indian Institute of Technology Kanpur,edu,871e6c1de2e0ba86bad8975b8411ad76a6a9aef9,citation,http://pdfs.semanticscholar.org/871e/6c1de2e0ba86bad8975b8411ad76a6a9aef9.pdf,Geometric Modeling of 3D-Face Features and Its Applications,2010 +521,FERET,feret,37.52914535,45.04886077,Urmia University,edu,8aa85d2f81d7496cf7105ee0a3785f140ddaa367,citation,http://www.csis.pace.edu/~ctappert/dps/2013BTAS/Papers/Paper%2019/PID2859743.pdf,Efficient processing of MRFs for unconstrained-pose face recognition,2013 +522,FERET,feret,51.24303255,-0.59001382,University of Surrey,edu,8aa85d2f81d7496cf7105ee0a3785f140ddaa367,citation,http://www.csis.pace.edu/~ctappert/dps/2013BTAS/Papers/Paper%2019/PID2859743.pdf,Efficient processing of MRFs for unconstrained-pose face recognition,2013 +523,FERET,feret,24.7246403,46.62335012,King Saud University,edu,674e739709537f0e562b6cf114f15a5cc57fde7e,citation,http://www.cse.unr.edu/~bebis/CGIV2014.pdf,Nonsubsampled Contourlet Transform Based Descriptors for Gender Recognition,2014 +524,FERET,feret,51.24303255,-0.59001382,University of Surrey,edu,062cea54e5d58ee41aea607cbf2ba0cf457aa4e7,citation,http://pdfs.semanticscholar.org/062c/ea54e5d58ee41aea607cbf2ba0cf457aa4e7.pdf,The DIARETDB1 Diabetic Retinopathy Database and Evaluation Protocol,2007 +525,FERET,feret,56.66340325,12.87929727,Halmstad University,edu,555f75077a02f33a05841f9b63a1388ec5fbcba5,citation,https://arxiv.org/pdf/1810.03360.pdf,A Survey on Periocular Biometrics Research,2016 +526,FERET,feret,40.7423025,-74.17928172,New Jersey Institute of Technology,edu,892db59add66fc581ae1a7338ff8bd6b7aa0f2b4,citation,http://pdfs.semanticscholar.org/892d/b59add66fc581ae1a7338ff8bd6b7aa0f2b4.pdf,FPGA-based Normalization for Modified Gram-Schmidt Orthogonalization,2010 +527,FERET,feret,-34.9189226,138.60423668,University of Adelaide,edu,019f1462c1b7101100334e4c421d35feea612492,citation,http://pdfs.semanticscholar.org/019f/1462c1b7101100334e4c421d35feea612492.pdf,Running Head : UNFAMILIAR FACE MATCHING The Effects of External Features and Time Pressure on Unfamiliar Face Matching,2006 +528,FERET,feret,41.70456775,-86.23822026,University of Notre Dame,edu,10c79df4f44b5e4c08f984f34370d292f31ef309,citation,http://pdfs.semanticscholar.org/10c7/9df4f44b5e4c08f984f34370d292f31ef309.pdf,Multi-Modal 2D and 3D Biometrics for Face Recognition,2003 +529,FERET,feret,39.2899685,-76.62196103,University of Maryland,edu,3514f66f155c271981a734f1523572edcd8fd10e,citation,http://www.umiacs.umd.edu/~jhchoi/paper/wacv2012_slide.pdf,A complementary local feature descriptor for face identification,2012 +530,FERET,feret,-27.5953995,-48.6154218,University of Campinas,edu,3514f66f155c271981a734f1523572edcd8fd10e,citation,http://www.umiacs.umd.edu/~jhchoi/paper/wacv2012_slide.pdf,A complementary local feature descriptor for face identification,2012 +531,FERET,feret,40.5709358,-105.08655256,Colorado State University,edu,aa4d1ad6fd2dbc05139b8121b500c2b1f6b35bec,citation,http://pdfs.semanticscholar.org/aa4d/1ad6fd2dbc05139b8121b500c2b1f6b35bec.pdf,Grassmann Registration Manifolds for Face Recognition,2008 +532,FERET,feret,51.24303255,-0.59001382,University of Surrey,edu,c79cf7f61441195404472102114bcf079a72138a,citation,https://pdfs.semanticscholar.org/9704/8d901389535b122f82a6a949bd8f596790f2.pdf,Pose-Invariant 2 D Face Recognition by Matching Using Graphical Models,2010 +533,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,1acf8970598bb2443fd2dd42ceeca1eb3f2fc613,citation,https://pdfs.semanticscholar.org/1acf/8970598bb2443fd2dd42ceeca1eb3f2fc613.pdf,Boosting Statistical Local Feature Based Classifiers for Face Recognition,2005 +534,FERET,feret,46.0658836,11.1159894,University of Trento,edu,a489a7951c7848ebae5a99ac590c016359a85434,citation,https://arxiv.org/pdf/1901.09774.pdf,Attribute-Guided Sketch Generation,2019 +535,FERET,feret,30.5097537,114.4062881,Huazhong University of Science and Technology,edu,a489a7951c7848ebae5a99ac590c016359a85434,citation,https://arxiv.org/pdf/1901.09774.pdf,Attribute-Guided Sketch Generation,2019 +536,FERET,feret,51.7534538,-1.25400997,University of Oxford,edu,a489a7951c7848ebae5a99ac590c016359a85434,citation,https://arxiv.org/pdf/1901.09774.pdf,Attribute-Guided Sketch Generation,2019 +537,FERET,feret,42.2942142,-83.71003894,University of Michigan,edu,a489a7951c7848ebae5a99ac590c016359a85434,citation,https://arxiv.org/pdf/1901.09774.pdf,Attribute-Guided Sketch Generation,2019 +538,FERET,feret,52.2380139,6.8566761,University of Twente,edu,0b55b31765f101535eac0d50b9da377f82136d2f,citation,http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/WorkShops/data/papers/163.pdf,Biometric binary string generation with detection rate optimized bit allocation,2008 +539,FERET,feret,42.4505507,-76.4783513,Cornell University,edu,bd7477c250f01f63f438c4f3bebe374caf4b86ba,citation,http://pdfs.semanticscholar.org/bd74/77c250f01f63f438c4f3bebe374caf4b86ba.pdf,Real-time Face and Hand Detection for Videoconferencing on a Mobile Device,2009 +540,FERET,feret,39.9808333,116.34101249,Beihang University,edu,9039b8097a78f460db9718bc961fdc7d89784092,citation,http://pdfs.semanticscholar.org/9039/b8097a78f460db9718bc961fdc7d89784092.pdf,3D Face Recognition Based on Local Shape Patterns and Sparse Representation Classifier,2011 +541,FERET,feret,37.5600406,126.9369248,Yonsei University,edu,ee458bee26e6371f9347b1972bbc9dc26b2f3713,citation,https://arxiv.org/pdf/1703.01396.pdf,Stacking-based deep neural network: Deep analytic network on convolutional spectral histogram features,2017 +542,FERET,feret,23.09461185,113.28788994,Sun Yat-Sen University,edu,80d42f74ee9bf03f3790c8d0f5a307deffe0b3b7,citation,https://doi.org/10.1109/TNNLS.2016.2522431,Learning Kernel Extended Dictionary for Face Recognition,2017 +543,FERET,feret,-27.49741805,153.01316956,University of Queensland,edu,2af19b5ff2ca428fa42ef4b85ddbb576b5d9a5cc,citation,http://pdfs.semanticscholar.org/2af1/9b5ff2ca428fa42ef4b85ddbb576b5d9a5cc.pdf,Multi-Region Probabilistic Histograms for Robust and Scalable Identity Inference,2009 +544,FERET,feret,45.7597643,3.1310213,"VESALIS SAS, France",company,778c1e95b6ea4ccf89067b83364036ab08797256,citation,https://doi.org/10.1109/TIFS.2012.2224866,Exploring Patterns of Gradient Orientations and Magnitudes for Face Recognition,2013 +545,FERET,feret,-27.49741805,153.01316956,University of Queensland,edu,b9504e4a2f40f459b5e83143e77f4972c7888445,citation,http://conradsanderson.id.au/pdfs/chen_avss_2008.pdf,Experimental Analysis of Face Recognition on Still and CCTV Images,2008 +546,FERET,feret,45.7413921,126.62552755,Harbin Institute of Technology,edu,1da1299088a6bf28167c58bbd46ca247de41eb3c,citation,https://doi.org/10.1109/ICASSP.2002.5745055,Face identification from a single example image based on Face-Specific Subspace (FSS),2002 +547,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,1da1299088a6bf28167c58bbd46ca247de41eb3c,citation,https://doi.org/10.1109/ICASSP.2002.5745055,Face identification from a single example image based on Face-Specific Subspace (FSS),2002 +548,FERET,feret,31.30104395,121.50045497,Fudan University,edu,3ca25a9e906b851df01a53f4443d66978a0243b8,citation,http://pdfs.semanticscholar.org/3ca2/5a9e906b851df01a53f4443d66978a0243b8.pdf,Improved Super-Resolution through Residual Neighbor Embedding,2006 +549,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,3ca25a9e906b851df01a53f4443d66978a0243b8,citation,http://pdfs.semanticscholar.org/3ca2/5a9e906b851df01a53f4443d66978a0243b8.pdf,Improved Super-Resolution through Residual Neighbor Embedding,2006 +550,FERET,feret,37.5600406,126.9369248,Yonsei University,edu,64fd48fae4d859583c4a031b51ce76ecb5de614c,citation,https://doi.org/10.1109/ICARCV.2008.4795556,Illuminated face normalization technique by using wavelet fusion and local binary patterns,2008 +551,FERET,feret,2.92749755,101.64185301,Multimedia University,edu,64fd48fae4d859583c4a031b51ce76ecb5de614c,citation,https://doi.org/10.1109/ICARCV.2008.4795556,Illuminated face normalization technique by using wavelet fusion and local binary patterns,2008 +552,FERET,feret,39.9808333,116.34101249,Beihang University,edu,70d2ab1af0edd5c0a30d576a5d4aa397c4f92d3e,citation,http://doi.org/10.1007/s11042-018-5608-2,Elastic preserving projections based on L1-norm maximization,2018 +553,FERET,feret,56.66340325,12.87929727,Halmstad University,edu,9cda3e56cec21bd8f91f7acfcefc04ac10973966,citation,https://doi.org/10.1109/IWBF.2016.7449688,"Periocular biometrics: databases, algorithms and directions",2016 +554,FERET,feret,34.0224149,-118.28634407,University of Southern California,edu,569008018f0b9c4abb8b5c662a6710a1fc38b5a6,citation,http://pdfs.semanticscholar.org/5690/08018f0b9c4abb8b5c662a6710a1fc38b5a6.pdf,Face Similarity Space as Perceived by Humans and Artificial Systems,1998 +555,FERET,feret,39.2899685,-76.62196103,University of Maryland,edu,569008018f0b9c4abb8b5c662a6710a1fc38b5a6,citation,http://pdfs.semanticscholar.org/5690/08018f0b9c4abb8b5c662a6710a1fc38b5a6.pdf,Face Similarity Space as Perceived by Humans and Artificial Systems,1998 +556,FERET,feret,52.22165395,21.00735776,Warsaw University of Technology,edu,76dff7008d9b8bf44ec5348f294d5518877c6182,citation,https://doi.org/10.1016/j.imavis.2014.09.004,Discrete area filters in accurate detection of faces and facial features,2014 +557,FERET,feret,39.5469449,-119.81346566,University of Nevada,edu,4bc2352b087bdc99ef5f00453e5d2272d522524c,citation,http://pdfs.semanticscholar.org/4bc2/352b087bdc99ef5f00453e5d2272d522524c.pdf,Investigating the Impact of Face Categorization on Recognition Performance,2005 +558,FERET,feret,41.70456775,-86.23822026,University of Notre Dame,edu,6250781bb606041fdc1621ba08aee541bfb1285b,citation,http://www.cse.nd.edu/Reports/2004/TR-2004-31.pdf,Ear Biometrics Using 2D and 3D Images,2005 +559,FERET,feret,13.65450525,100.49423171,Robotics Institute,edu,f6fa68847e0ce7fda05a9c73ebcb484f0b42a9af,citation,http://pdfs.semanticscholar.org/f6fa/68847e0ce7fda05a9c73ebcb484f0b42a9af.pdf,Face Recognition Across Pose and Illumination,2011 +560,FERET,feret,45.5039761,-73.5749687,McGill University,edu,3a34c622c1af4b181e99d4a58f7870314944d2c4,citation,http://pdfs.semanticscholar.org/3a34/c622c1af4b181e99d4a58f7870314944d2c4.pdf,D View - Invariant Face Recognition Using a Hierarchical Pose - Normalization Strategy,2005 +561,FERET,feret,38.99203005,-76.9461029,University of Maryland College Park,edu,ece80165040e9d8304c5dd808a6cdb29c8ecbf5b,citation,https://pdfs.semanticscholar.org/a2f6/8e5898364ac7c1d4691d23fab716ad672712.pdf,Looking at People Using Partial Least Squares,2010 +562,FERET,feret,53.21967825,6.56251482,University of Groningen,edu,ae1de0359f4ed53918824271c888b7b36b8a5d41,citation,http://pdfs.semanticscholar.org/ae1d/e0359f4ed53918824271c888b7b36b8a5d41.pdf,Low-cost Automatic Inpainting for Artifact Suppression in Facial Images,2013 +563,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,1e46d0714398904e557f27022908121fa8a7902f,citation,http://pdfs.semanticscholar.org/1e46/d0714398904e557f27022908121fa8a7902f.pdf,Baseline Evaluations on the CAS-PEAL-R1 Face Database,2004 +564,FERET,feret,42.357757,-83.06286711,Wayne State University,edu,bec31269632c17206deb90cd74367d1e6586f75f,citation,http://pdfs.semanticscholar.org/bec3/1269632c17206deb90cd74367d1e6586f75f.pdf,Large-scale Datasets: Faces with Partial Occlusions and Pose Variations in the Wild,2017 +565,FERET,feret,53.27639715,-9.05829961,National University of Ireland Galway,edu,e08038b14165536c52ffe950d90d0f43be9c8f15,citation,https://arxiv.org/pdf/1703.08383.pdf,Smart Augmentation Learning an Optimal Data Augmentation Strategy,2017 +566,FERET,feret,24.7246403,46.62335012,King Saud University,edu,edf01e1c84e2f80500fd74da69f428617f2a1665,citation,http://www.cse.unr.edu/~bebis/IWSSIP2013.pdf,Gender recognition from faces using bandlet and local binary patterns,2013 +567,FERET,feret,37.4102193,-122.05965487,Carnegie Mellon University,edu,6b3e360b80268fda4e37ff39b7f303e3684e8719,citation,,FACE RECOGNITION FROM SKETCHES USING ADVANCED CORRELATION FILTERS USING HYBRID EIGENANALYSIS FOR FACE SYNTHESIS,2006 +568,FERET,feret,33.776033,-84.39884086,Georgia Institute of Technology,edu,987feaa36f3bb663ac9fa767718c6a90ea0dab3f,citation,https://pdfs.semanticscholar.org/987f/eaa36f3bb663ac9fa767718c6a90ea0dab3f.pdf,A Distributed System for Supporting Spatio-temporal Analysis on Large-scale Camera Networks,2012 +569,FERET,feret,48.9095338,9.1831892,University of Stuttgart,edu,987feaa36f3bb663ac9fa767718c6a90ea0dab3f,citation,https://pdfs.semanticscholar.org/987f/eaa36f3bb663ac9fa767718c6a90ea0dab3f.pdf,A Distributed System for Supporting Spatio-temporal Analysis on Large-scale Camera Networks,2012 +570,FERET,feret,42.9336278,-78.88394479,SUNY Buffalo,edu,987feaa36f3bb663ac9fa767718c6a90ea0dab3f,citation,https://pdfs.semanticscholar.org/987f/eaa36f3bb663ac9fa767718c6a90ea0dab3f.pdf,A Distributed System for Supporting Spatio-temporal Analysis on Large-scale Camera Networks,2012 +571,FERET,feret,-33.3578899,151.37834708,University of Newcastle,edu,2feb7c57d51df998aafa6f3017662263a91625b4,citation,https://pdfs.semanticscholar.org/d344/9eaaf392fd07b676e744410049f4095b4b5c.pdf,Feature Selection for Intelligent Transportation Systems,2014 +572,FERET,feret,22.1240187,113.54510901,University of Macau,edu,c3558f67b3f4b618e6b53ce844faf38240ee7cd7,citation,https://arxiv.org/pdf/1802.07589.pdf,Collaboratively Weighting Deep and Classic Representation via $l_2$ Regularization for Image Classification,2018 +573,FERET,feret,50.89273635,-1.39464295,University of Southampton,edu,c3558f67b3f4b618e6b53ce844faf38240ee7cd7,citation,https://arxiv.org/pdf/1802.07589.pdf,Collaboratively Weighting Deep and Classic Representation via $l_2$ Regularization for Image Classification,2018 +574,FERET,feret,32.20302965,119.50968362,Jiangsu University,edu,c3558f67b3f4b618e6b53ce844faf38240ee7cd7,citation,https://arxiv.org/pdf/1802.07589.pdf,Collaboratively Weighting Deep and Classic Representation via $l_2$ Regularization for Image Classification,2018 +575,FERET,feret,35.9990522,-78.9290629,Duke University,edu,a7678cce6bfca4a34feee5564c87c80fe192a0fd,citation,http://pdfs.semanticscholar.org/a767/8cce6bfca4a34feee5564c87c80fe192a0fd.pdf,The Weakly Identifying System for Doorway Monitoring,2007 +576,FERET,feret,22.42031295,114.20788644,Chinese University of Hong Kong,edu,550289407a642e81e1ef9dc0476117ed7816e9b5,citation,http://pdfs.semanticscholar.org/5502/89407a642e81e1ef9dc0476117ed7816e9b5.pdf,Conditional Infomax Learning: An Integrated Framework for Feature Extraction and Fusion,2006 +577,FERET,feret,39.977217,116.337632,Microsoft Research Asia,company,550289407a642e81e1ef9dc0476117ed7816e9b5,citation,http://pdfs.semanticscholar.org/5502/89407a642e81e1ef9dc0476117ed7816e9b5.pdf,Conditional Infomax Learning: An Integrated Framework for Feature Extraction and Fusion,2006 +578,FERET,feret,41.70456775,-86.23822026,University of Notre Dame,edu,6577d30abd8bf5b21901572504bd82101a7eed75,citation,http://pdfs.semanticscholar.org/6577/d30abd8bf5b21901572504bd82101a7eed75.pdf,Ear Biometrics in Human,2006 +579,FERET,feret,45.42580475,-75.68740118,University of Ottawa,edu,65293ecf6a4c5ab037a2afb4a9a1def95e194e5f,citation,http://pdfs.semanticscholar.org/6529/3ecf6a4c5ab037a2afb4a9a1def95e194e5f.pdf,"Face , Age and Gender Recognition using Local Descriptors",2014 +580,FERET,feret,40.00229045,116.32098908,Tsinghua University,edu,885c37f94e9edbbb2177cfba8cb1ad840b2a5f20,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8006255,Simultaneous Local Binary Feature Learning and Encoding for Homogeneous and Heterogeneous Face Recognition,2018 +581,FERET,feret,1.3484104,103.68297965,Nanyang Technological University,edu,885c37f94e9edbbb2177cfba8cb1ad840b2a5f20,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8006255,Simultaneous Local Binary Feature Learning and Encoding for Homogeneous and Heterogeneous Face Recognition,2018 +582,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,279acfde0286bb76dd7717abebc3c8acf12d2c5f,citation,http://www.cbsr.ia.ac.cn/users/zlei/papers/ICPR2014/Lei-ICPR-14.pdf,Local Gradient Order Pattern for Face Representation and Recognition,2014 +583,FERET,feret,22.3874201,114.2082222,Hong Kong Baptist University,edu,17f472a7cb25bf1e76ff29181b1d40585e2ae5c1,citation,https://doi.org/10.1109/BTAS.2015.7358764,Fusing binary templates for multi-biometric cryptosystems,2015 +584,FERET,feret,45.7413921,126.62552755,Harbin Institute of Technology,edu,cf671dc13696d1643cc1f32f7d32c329b16cd745,citation,http://pdfs.semanticscholar.org/cf67/1dc13696d1643cc1f32f7d32c329b16cd745.pdf,Multiple Fisher Classifiers Combination for Face Recognition based on Grouping AdaBoosted Gabor Features,2005 +585,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,cf671dc13696d1643cc1f32f7d32c329b16cd745,citation,http://pdfs.semanticscholar.org/cf67/1dc13696d1643cc1f32f7d32c329b16cd745.pdf,Multiple Fisher Classifiers Combination for Face Recognition based on Grouping AdaBoosted Gabor Features,2005 +586,FERET,feret,42.718568,-84.47791571,Michigan State University,edu,b53485dbdd2dc5e4f3c7cff26bd8707964bb0503,citation,http://doi.org/10.1007/s11263-017-1012-z,Pose-Invariant Face Alignment via CNN-Based Dense 3D Model Fitting,2017 +587,FERET,feret,52.9387428,-1.20029569,University of Nottingham,edu,9fbcf40b0649c03ba0f38f940c34e7e6c9e04c03,citation,https://doi.org/10.1007/s10044-006-0033-y,A review on Gabor wavelets for face recognition,2006 +588,FERET,feret,35.9542493,-83.9307395,University of Tennessee,edu,d103df0381582003c7a8930b68047b4f26d9b613,citation,http://pdfs.semanticscholar.org/d103/df0381582003c7a8930b68047b4f26d9b613.pdf,Quality Assessment and Restoration of Face Images in Long Range/High Zoom Video,2006 +589,FERET,feret,13.0222347,77.56718325,Indian Institute of Science Bangalore,edu,56fb30b24e7277b47d366ca2c491749eee4d6bb1,citation,https://doi.org/10.1109/ICAPR.2015.7050658,Using Bayesian statistics and Gabor Wavelets for recognition of human faces,2015 +590,FERET,feret,39.2899685,-76.62196103,University of Maryland,edu,963a004e208ce4bd26fa79a570af61d31651b3c3,citation,https://doi.org/10.1016/j.jvlc.2009.01.011,Computational methods for modeling facial aging: A survey,2009 +591,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,4a18adc7f5a090a041528a88166671248703f6e0,citation,http://pdfs.semanticscholar.org/c2c3/ecd39dd24e2b57ae6023536cc1fcd29d184a.pdf,Illumination Normalization for Robust Face Recognition Against Varying Lighting Conditions,2003 +592,FERET,feret,45.7413921,126.62552755,Harbin Institute of Technology,edu,4a18adc7f5a090a041528a88166671248703f6e0,citation,http://pdfs.semanticscholar.org/c2c3/ecd39dd24e2b57ae6023536cc1fcd29d184a.pdf,Illumination Normalization for Robust Face Recognition Against Varying Lighting Conditions,2003 +593,FERET,feret,-31.95040445,115.79790037,University of Western Australia,edu,301662c2a6ed86e48f21c1d24bfc67b403201b0c,citation,http://pdfs.semanticscholar.org/688d/0dddf90995ba6248de148e58030cb8f558e8.pdf,Repetition Suppression in Ventral Visual Cortex Is Diminished as a Function of Increasing Autistic Traits,2015 +594,FERET,feret,52.17638955,0.14308882,University of Cambridge,edu,301662c2a6ed86e48f21c1d24bfc67b403201b0c,citation,http://pdfs.semanticscholar.org/688d/0dddf90995ba6248de148e58030cb8f558e8.pdf,Repetition Suppression in Ventral Visual Cortex Is Diminished as a Function of Increasing Autistic Traits,2015 +595,FERET,feret,28.2290209,112.99483204,"National University of Defense Technology, China",edu,c48b2582429cc9ae427a264eed469d08b571acde,citation,https://pdfs.semanticscholar.org/c48b/2582429cc9ae427a264eed469d08b571acde.pdf,Facial Peculiarity Retrieval via Deep Neural Networks Fusion,2018 +596,FERET,feret,40.5709358,-105.08655256,Colorado State University,edu,878ec66a3bb87f23f3f8fd96ee504f79e6100a95,citation,https://pdfs.semanticscholar.org/878e/c66a3bb87f23f3f8fd96ee504f79e6100a95.pdf,THESIS EVALUATING THE PERFORMANCE OF IPHOTO FACIAL RECOGNITION AT THE BIOMETRIC VERIFICATION TASK,2012 +597,FERET,feret,41.70456775,-86.23822026,University of Notre Dame,edu,124f6992202777c09169343d191c254592e4428c,citation,https://arxiv.org/pdf/1803.07140.pdf,Visual Psychophysics for Making Face Recognition Algorithms More Explainable,2018 +598,FERET,feret,42.36782045,-71.12666653,Harvard University,edu,124f6992202777c09169343d191c254592e4428c,citation,https://arxiv.org/pdf/1803.07140.pdf,Visual Psychophysics for Making Face Recognition Algorithms More Explainable,2018 +599,FERET,feret,45.7413921,126.62552755,Harbin Institute of Technology,edu,4156f9fc5983b09eb97ad3d9abc248b15440b955,citation,http://pdfs.semanticscholar.org/4156/f9fc5983b09eb97ad3d9abc248b15440b955.pdf,"2 Subspace Methods for Face Recognition : Singularity , Regularization , and Robustness",2012 +600,FERET,feret,64.137274,-21.94561454,University of Iceland,edu,533d70c914a4b84ec7f35ef6c74bb3acba4c26fc,citation,http://pdfs.semanticscholar.org/533d/70c914a4b84ec7f35ef6c74bb3acba4c26fc.pdf,Blaming the victims of your own mistakes: How visual search accuracy influences evaluation of stimuli.,2015 +601,FERET,feret,51.5231607,-0.1282037,University College London,edu,533d70c914a4b84ec7f35ef6c74bb3acba4c26fc,citation,http://pdfs.semanticscholar.org/533d/70c914a4b84ec7f35ef6c74bb3acba4c26fc.pdf,Blaming the victims of your own mistakes: How visual search accuracy influences evaluation of stimuli.,2015 +602,FERET,feret,40.0141905,-83.0309143,University of Electronic Science and Technology of China,edu,ccd7a6b9f23e983a3fc6a70cc3b9c9673d70bf2c,citation,http://pdfs.semanticscholar.org/ccd7/a6b9f23e983a3fc6a70cc3b9c9673d70bf2c.pdf,Symmetrical Two-Dimensional PCA with Image Measures in Face Recognition,2012 +603,FERET,feret,35.9113971,-79.0504529,University of North Carolina at Chapel Hill,edu,60a006bdfe5b8bf3243404fae8a5f4a9d58fa892,citation,http://alumni.cs.ucr.edu/~mkafai/papers/Paper_bwild.pdf,A reference-based framework for pose invariant face recognition,2015 +604,FERET,feret,32.0565957,118.77408833,Nanjing University,edu,19fed85436eff43e60b9476e3d8742dfedba6384,citation,http://pdfs.semanticscholar.org/19fe/d85436eff43e60b9476e3d8742dfedba6384.pdf,A Novel Multiple Kernel Sparse Representation based Classification for Face Recognition,2014 +605,FERET,feret,34.80809035,135.45785218,Osaka University,edu,244c5f88186475bc3b051be8ebb6422e4b8de707,citation,http://www.am.sanken.osaka-u.ac.jp/~mansur/files/cvpr2012.pdf,Video from nearly still: An application to low frame-rate gait recognition,2012 +606,FERET,feret,40.00229045,116.32098908,Tsinghua University,edu,977bedd692c240c162481ef769b31e0f5455469a,citation,http://pdfs.semanticscholar.org/977b/edd692c240c162481ef769b31e0f5455469a.pdf,A Two-Step Approach to Hallucinating Faces: Global Parametric Model and Local Nonparametric Model,2001 +607,FERET,feret,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,9e1c3c7f1dce662a877727a821bdf41c5cd906bb,citation,http://pdfs.semanticscholar.org/9e1c/3c7f1dce662a877727a821bdf41c5cd906bb.pdf,Learning Disentangling and Fusing Networks for Face Completion Under Structured Occlusions,2017 +608,FERET,feret,42.718568,-84.47791571,Michigan State University,edu,4308f53244bbb6a1e22ba1d39e079e5065a51364,citation,http://pdfs.semanticscholar.org/4308/f53244bbb6a1e22ba1d39e079e5065a51364.pdf,Ethnicity Identification from Face Images,2004 +609,FERET,feret,33.30715065,-111.67653157,Arizona State University,edu,5b1f3a60518c3a552de09ed51646764551f4cb84,citation,http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/WorkShops/data/papers/121.pdf,Multiple cue integration in transductive confidence machines for head pose classification,2008 +610,FERET,feret,3.06405715,101.6005974,Monash University Malaysia,edu,a96a7a381872ae40179ded0d79f905da0455d9d1,citation,http://pdfs.semanticscholar.org/a96a/7a381872ae40179ded0d79f905da0455d9d1.pdf,Segmentation of Saimaa Ringed Seals for Identification Purposes,2015 +611,FERET,feret,32.87935255,-117.23110049,"University of California, San Diego",edu,9a7fcd09afd8c3ae227e621795168c94ffbac71d,citation,http://mplab.ucsd.edu/wp-content/uploads/2011-WuEtAl-FERA-DatasetTransfer.pdf,Action unit recognition transfer across datasets,2011 +612,FERET,feret,40.0044795,116.370238,Chinese Academy of Sciences,edu,f2d813a987f0aed5056d5eccbadee8738bbd0a4b,citation,http://pdfs.semanticscholar.org/f2d8/13a987f0aed5056d5eccbadee8738bbd0a4b.pdf,Fast Matching by 2 Lines of Code for Large Scale Face Recognition Systems,2013 +613,FERET,feret,51.44415765,7.26096541,Ruhr-University Bochum,edu,8489236bbbb3298f4513c7e005a85ba7a48cc946,citation,http://pdfs.semanticscholar.org/8489/236bbbb3298f4513c7e005a85ba7a48cc946.pdf,Vision and Touch for Grasping,2000 +614,FERET,feret,1.3484104,103.68297965,Nanyang Technological University,edu,1dede3e0f2e0ed2984aca8cd98631b43c3f887b9,citation,http://www3.ntu.edu.sg/home/EXDJiang/ICASSP13-3.pdf,A vote of confidence based interest point detector,2013 +615,FERET,feret,39.1254938,-77.22293475,National Institute of Standards and Technology,edu,4c56f119ebf7c71f2a83e4d79e8d88314b8e6044,citation,http://www.nist.gov/customcf/get_pdf.cfm?pub_id=906254,An other-race effect for face recognition algorithms,2011 +616,FERET,feret,32.9820799,-96.7566278,University of Texas at Dallas,edu,4c56f119ebf7c71f2a83e4d79e8d88314b8e6044,citation,http://www.nist.gov/customcf/get_pdf.cfm?pub_id=906254,An other-race effect for face recognition algorithms,2011 +617,FERET,feret,22.42031295,114.20788644,Chinese University of Hong Kong,edu,1b67053c682dcbc9dc368de89fff32f787320a96,citation,http://mmlab.ie.cuhk.edu.hk/archive/2007/CVPR07_face01.pdf,Quality-Driven Face Occlusion Detection and Recovery,2007 +618,FERET,feret,46.0501558,14.46907327,University of Ljubljana,edu,86274e426bfe962d5cb994d5d9c6829f64410c32,citation,http://pdfs.semanticscholar.org/8627/4e426bfe962d5cb994d5d9c6829f64410c32.pdf,Face Recognition in Different Subspaces: A Comparative Study,2006 +619,FERET,feret,40.8419836,-73.94368971,Columbia University,edu,4c170a0dcc8de75587dae21ca508dab2f9343974,citation,http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf,FaceTracer: A Search Engine for Large Collections of Images with Faces,2008 +620,FERET,feret,39.2899685,-76.62196103,University of Maryland,edu,4276eb27e2e4fc3e0ceb769eca75e3c73b7f2e99,citation,http://pdfs.semanticscholar.org/4276/eb27e2e4fc3e0ceb769eca75e3c73b7f2e99.pdf,Face Recognition From Video,2008 +621,FERET,feret,45.7413921,126.62552755,Harbin Institute of Technology,edu,63f9f3f0e1daede934d6dde1a84fb7994f8929f0,citation,http://www.jdl.ac.cn/user/sgshan/pub/ICCV2005-ZhangShan-LGBP.pdf,Local Gabor binary pattern histogram sequence (LGBPHS): a novel non-statistical model for face representation and recognition,2005 +622,FERET,feret,37.4102193,-122.05965487,Carnegie Mellon University,edu,39e1fb5539737a17ae5fc25de30377dfaecfa100,citation,https://www.ri.cmu.edu/pub_files/pub4/gross_ralph_2004_1/gross_ralph_2004_1.pdf,Appearance-based face recognition and light-fields,2004 +623,FERET,feret,13.0105838,80.2353736,Anna University,edu,e19ba2a6ce70fb94d31bb0b39387aa734e6860b0,citation,http://pdfs.semanticscholar.org/e19b/a2a6ce70fb94d31bb0b39387aa734e6860b0.pdf,A Different Approach to Appearance –based Statistical Method for Face Recognition Using Median,2007 +624,FERET,feret,32.87935255,-117.23110049,"University of California, San Diego",edu,528a6698911ff30aa648af4d0a5cf0dd9ee90b5c,citation,https://pdfs.semanticscholar.org/528a/6698911ff30aa648af4d0a5cf0dd9ee90b5c.pdf,Is All Face Processing Holistic ? The View from UCSD,2003 +625,FERET,feret,41.6659,-91.57310307,University of Iowa,edu,528a6698911ff30aa648af4d0a5cf0dd9ee90b5c,citation,https://pdfs.semanticscholar.org/528a/6698911ff30aa648af4d0a5cf0dd9ee90b5c.pdf,Is All Face Processing Holistic ? The View from UCSD,2003 +626,FERET,feret,35.9023226,14.4834189,University of Malta,edu,4c5566d4cb47f4db45d46c6aaf324d6057b580bc,citation,http://doi.ieeecomputersociety.org/10.1109/AVSS.2016.7738068,Gender recognition from face images with trainable COSFIRE filters,2016 +627,FERET,feret,40.5709358,-105.08655256,Colorado State University,edu,462fe97ce53e58c8e2cb01c925b46bcf3bb53eda,citation,http://www.cs.colostate.edu/~draper/papers/givens_cvpr04.pdf,How features of the human face affect recognition: a statistical comparison of three face recognition algorithms,2004 +628,FERET,feret,61.44964205,23.85877462,Tampere University of Technology,edu,c95e379aab32a1611f1f549fd11a3e9498ab5dae,citation,http://pdfs.semanticscholar.org/c95e/379aab32a1611f1f549fd11a3e9498ab5dae.pdf,Constructing Benchmark Databases and Protocols for Medical Image Analysis: Diabetic Retinopathy,2013 +629,FERET,feret,61.49412325,23.77920678,University of Tampere,edu,c95e379aab32a1611f1f549fd11a3e9498ab5dae,citation,http://pdfs.semanticscholar.org/c95e/379aab32a1611f1f549fd11a3e9498ab5dae.pdf,Constructing Benchmark Databases and Protocols for Medical Image Analysis: Diabetic Retinopathy,2013 +630,FERET,feret,52.4107358,-4.05295501,Aberystwyth University,edu,9264b390aa00521f9bd01095ba0ba4b42bf84d7e,citation,http://pdfs.semanticscholar.org/9264/b390aa00521f9bd01095ba0ba4b42bf84d7e.pdf,Displacement Template with Divide-&-Conquer Algorithm for Significantly Improving Descriptor Based Face Recognition Approaches,2012 +631,FERET,feret,53.8925662,-122.81471592,University of Northern British Columbia,edu,9264b390aa00521f9bd01095ba0ba4b42bf84d7e,citation,http://pdfs.semanticscholar.org/9264/b390aa00521f9bd01095ba0ba4b42bf84d7e.pdf,Displacement Template with Divide-&-Conquer Algorithm for Significantly Improving Descriptor Based Face Recognition Approaches,2012 +632,FERET,feret,40.7423025,-74.17928172,New Jersey Institute of Technology,edu,5bb9540375ba9bba22f8a22ba2990cfe7ff6780c,citation,http://pdfs.semanticscholar.org/5bb9/540375ba9bba22f8a22ba2990cfe7ff6780c.pdf,Discriminant Analysis of Haar Features for Accurate Eye Detection,2011 +633,FERET,feret,40.11116745,-88.22587665,"University of Illinois, Urbana-Champaign",edu,01a19d3e902d7431f533f5f0b54510a7fb9bda23,citation,http://pdfs.semanticscholar.org/3521/15bbb399b94865a7d870d1cd1a79e42104b8.pdf,A Practical Face Relighting Method for Directional Lighting Normalization,2005 +634,FERET,feret,42.718568,-84.47791571,Michigan State University,edu,022f38febc47818a010dc64ca54f6e137055cc88,citation,http://biometrics.cse.msu.edu/Publications/Face/HanJain_3DFaceTextureModeling_UncalibratedFrontalProfileImages_BTAS12.pdf,3D face texture modeling from uncalibrated frontal and profile images,2012 +635,FERET,feret,34.0224149,-118.28634407,University of Southern California,edu,b13014374863715c421ed92d3827fc7e09a3e47a,citation,https://pdfs.semanticscholar.org/fe31/8312fd51fc65d132084c3862c85f067e6edf.pdf,Rapid Correspondence Finding in Networks of Cortical Columns,2006 +636,FERET,feret,44.97308605,-93.23708813,University of Minnesota,edu,8a55c385c8cf76cadaa28c7ab1fde9dc28577b08,citation,http://www-users.cs.umn.edu/~boley/publications/papers/ICCV2011.pdf,Positive definite dictionary learning for region covariances,2011 +637,FERET,feret,30.3125525,120.3430946,Hangzhou Dianzi University,edu,d40cd10f0f3e64fd9b0c2728089e10e72bea9616,citation,http://pdfs.semanticscholar.org/d40c/d10f0f3e64fd9b0c2728089e10e72bea9616.pdf,Enhancing Face Identification Using Local Binary Patterns and K-Nearest Neighbors,2017 diff --git a/site/datasets/final/feret.json b/site/datasets/final/feret.json index a6584c3b..27cffaec 100644 --- a/site/datasets/final/feret.json +++ b/site/datasets/final/feret.json @@ -1 +1 @@ -{"id": "0c4a139bb87c6743c7905b29a3cfec27a5130652", "paper": {"paper_id": "0c4a139bb87c6743c7905b29a3cfec27a5130652", "key": "feret", "title": "The FERET Verification Testing Protocol for Face Recognition Algorithms", "year": 1998, "pdf": "http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf", "address": "", "name": "FERET"}, "address": null, "additional_papers": [{"paper_id": "dc8b25e35a3acb812beb499844734081722319b4", "key": "feret", "title": "The FERET Promising Research database and evaluation procedure for face - recognition algorithms", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/dc8b/25e35a3acb812beb499844734081722319b4.pdf", "address": "", "name": "FERET"}, {"paper_id": "31de9b3dd6106ce6eec9a35991b2b9083395fd0b", "key": "feret", "title": "FERET (Face Recognition Technology) Recognition Algorithm Development and Test Results", "year": 1996, "pdf": "http://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf", "address": "", "name": "FERET"}, {"paper_id": "0f0fcf041559703998abf310e56f8a2f90ee6f21", "key": "feret", "title": "The FERET Evaluation Methodology for Face-Recognition Algorithms", "year": 1997, "pdf": "http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf", "address": "", "name": "FERET"}], "citations": [{"id": "919d0e681c4ef687bf0b89fe7c0615221e9a1d30", "title": "Fractal Techniques for Face Recognition", "addresses": [{"address": "Queensland University of Technology", "lat": "-27.47715625", "lng": "153.02841004", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/919d/0e681c4ef687bf0b89fe7c0615221e9a1d30.pdf"}, {"id": "51ed4c92cab9336a2ac41fa8e0293c2f5f9bf3b6", "title": "A Survey of Face Detection, Extraction and Recognition", "addresses": [{"address": "Huazhong University of Science and Technology", "lat": "30.50975370", "lng": "114.40628810", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/51ed/4c92cab9336a2ac41fa8e0293c2f5f9bf3b6.pdf"}, {"id": "8aff9c8a0e17be91f55328e5be5e94aea5227a35", "title": "Sparse Tensor Discriminant Color Space for Face Verification", "addresses": [{"address": "Jilin University", "lat": "22.05356500", "lng": "113.39913285", "type": "edu"}, {"address": "Raytheon BBN Technologies", "lat": "42.38980550", "lng": "-71.14759860", "type": "company"}, {"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2012, "pdf": "https://doi.org/10.1109/TNNLS.2012.2191620"}, {"id": "d3b5a52062e5f5415df527705cb24af9b0846617", "title": "Advances and Challenges in 3D and 2D+3D Human Face Recognition", "addresses": [{"address": "University of Texas at Austin", "lat": "30.28415100", "lng": "-97.73195598", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/d3b5/a52062e5f5415df527705cb24af9b0846617.pdf"}, {"id": "03167776e17bde31b50f294403f97ee068515578", "title": "Chapter 11. Facial Expression Analysis", "addresses": [{"address": "University of Pittsburgh", "lat": "40.44415295", "lng": "-79.96243993", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/0316/7776e17bde31b50f294403f97ee068515578.pdf"}, {"id": "658eb1fd14808d10e0f4fee99c5506a1bb0e351a", "title": "Multi-Discriminant Classification Algorithm for Face Verification", "addresses": [{"address": "National Cheng Kung University", "lat": "22.99919160", "lng": "120.21625134", "type": "edu"}], "year": "2008", "pdf": "https://pdfs.semanticscholar.org/658e/b1fd14808d10e0f4fee99c5506a1bb0e351a.pdf"}, {"id": "49570b41bd9574bd9c600e24b269d945c645b7bd", "title": "A Framework for Performance Evaluation of Face Recognition Algorithms", "addresses": [{"address": "Arizona State University", "lat": "33.30715065", "lng": "-111.67653157", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/4957/0b41bd9574bd9c600e24b269d945c645b7bd.pdf"}, {"id": "d65b82b862cf1dbba3dee6541358f69849004f30", "title": "2.5D Elastic graph matching", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/d65b/82b862cf1dbba3dee6541358f69849004f30.pdf"}, {"id": "6d5e12ee5d75d5f8c04a196dd94173f96dc8603f", "title": "Learning a similarity metric discriminatively, with application to face verification", "addresses": [{"address": "Courant Institute of Mathematical Sciences", "lat": "40.72864840", "lng": "-73.99568630", "type": "edu"}, {"address": "New York University", "lat": "40.72925325", "lng": "-73.99625394", "type": "edu"}], "year": 2005, "pdf": "http://www.cs.toronto.edu/~hinton/csc2535_06/readings/chopra-05.pdf"}, {"id": "0e1403f2182609fb64ed72913f7294fea7d02bd6", "title": "Learning Support Vectors for Face Verification and Recognition", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}, {"address": "Czech Technical University", "lat": "50.07642960", "lng": "14.41802312", "type": "edu"}], "year": 2000, "pdf": "http://pdfs.semanticscholar.org/9457/cdb4b1f4764f70fe86b50e26abc34930f882.pdf"}, {"id": "fe9a6a93af9c32f6b0454a7cf6897409124514bd", "title": "Designing a smart card face verification system", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/fe9a/6a93af9c32f6b0454a7cf6897409124514bd.pdf"}, {"id": "92a3d5ab3eb540a11eddf1b836c1db28640b2746", "title": "Face Recognition using 3D Facial Shape and Color Map Information: Comparison and Combination", "addresses": [{"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/92a3/d5ab3eb540a11eddf1b836c1db28640b2746.pdf"}, {"id": "23fc83c8cfff14a16df7ca497661264fc54ed746", "title": "Comprehensive Database for Facial Expression Analysis", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "University of Pittsburgh", "lat": "40.44415295", "lng": "-79.96243993", "type": "edu"}], "year": 2000, "pdf": "http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf"}, {"id": "54bb25a213944b08298e4e2de54f2ddea890954a", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Middlesex University", "lat": "51.59029705", "lng": "-0.22963221", "type": "edu"}], "year": 2017, "pdf": "http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf"}, {"id": "aec46facf3131a5be4fc23db4ebfb5514e904ae3", "title": "Audio to the rescue", "addresses": [{"address": "Purdue University", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/aec4/6facf3131a5be4fc23db4ebfb5514e904ae3.pdf"}, {"id": "544c06584c95bfdcafbd62e04fb796e575981476", "title": "Human Identification from Body Shape", "addresses": [{"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/544c/06584c95bfdcafbd62e04fb796e575981476.pdf"}, {"id": "84a74ef8680b66e6dccbc69ae80321a52780a68e", "title": "Facial Expression Recognition", "addresses": [{"address": "University of Pittsburgh", "lat": "40.44415295", "lng": "-79.96243993", "type": "edu"}, {"address": "Robotics Institute", "lat": "13.65450525", "lng": "100.49423171", "type": "edu"}], "year": "2011", "pdf": "http://doi.org/10.1007/978-0-85729-932-1_19"}, {"id": "b3cc2554449fb10002250bbc178e1009fc2fdb70", "title": "Face Recognition Based on Local Zernike Moments", "addresses": [{"address": "Eastern Mediterranean University", "lat": "35.14479945", "lng": "33.90492318", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/b3cc/2554449fb10002250bbc178e1009fc2fdb70.pdf"}, {"id": "fbfb0de017d57c5f282050dadb77797d97785ba5", "title": "Enabling EBGM Face Authentication on mobile devices", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/fbfb/0de017d57c5f282050dadb77797d97785ba5.pdf"}, {"id": "0a602b85c80cef7d38209226188aaab94d5349e8", "title": "THE FLORIDA STATE UNIVERSITY COLLEGE OF ARTS AND SCIENCES AUTOMATED FACE TRACKING AND RECOGNITION By MATTHEW", "addresses": [{"address": "Florida State University", "lat": "30.44235995", "lng": "-84.29747867", "type": "edu"}], "year": "", "pdf": "http://pdfs.semanticscholar.org/0a60/2b85c80cef7d38209226188aaab94d5349e8.pdf"}, {"id": "a2bcfba155c990f64ffb44c0a1bb53f994b68a15", "title": "The Photoface database", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": 2011, "pdf": "https://doi.org/10.1109/CVPRW.2011.5981840"}, {"id": "425833b5fe892b00dcbeb6e3975008e9a73a5a72", "title": "A Review of Performance Evaluation for Biometrics Systems", "addresses": [{"address": "Yonsei University", "lat": "37.56004060", "lng": "126.93692480", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/4258/33b5fe892b00dcbeb6e3975008e9a73a5a72.pdf"}, {"id": "7ef44b7c2b5533d00001ae81f9293bdb592f1146", "title": "D\u00e9tection des \u00e9motions \u00e0 partir de vid\u00e9os dans un environnement non contr\u00f4l\u00e9 Detection of emotions from video in non-controlled environment", "addresses": [{"address": "Aalborg University", "lat": "57.01590275", "lng": "9.97532827", "type": "edu"}], "year": "2003", "pdf": "https://pdfs.semanticscholar.org/7ef4/4b7c2b5533d00001ae81f9293bdb592f1146.pdf"}, {"id": "6e968f74fd6b4b3b172c787f298b3d4746ec5cc9", "title": "A 3D Polygonal Line Chains Matching Method for Face Recognition", "addresses": [{"address": "Griffith University", "lat": "-27.55339750", "lng": "153.05336234", "type": "edu"}], "year": 2013, "pdf": "http://www.ict.griffith.edu.au/~junzhou/papers/C_DICTA_2013_C.pdf"}, {"id": "3a1c3307f57ef09577ac0dc8cd8b090a4fe8091f", "title": "Thermal-to-visible face recognition using partial least squares.", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/3a1c/3307f57ef09577ac0dc8cd8b090a4fe8091f.pdf"}, {"id": "81a8b2e55bcea9d9b26e67fcbb5a30ca8a8defc3", "title": "Database size effects on performance on a smart card face verification system", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2006, "pdf": "http://multispectral-imagery-lab.sandbox.wvu.edu/files/d/337b61b4-b6af-4c96-8314-c282ebebf299/databasesizeeffectsonperformancesmartcardfaceverification.pdf"}, {"id": "b8b0f0ca35cb02334aaa3192559fb35f0c90f8fa", "title": "Face Recognition in Low-resolution Images by Using Local Zernike Moments", "addresses": [{"address": "Istanbul Technical University", "lat": "41.10427915", "lng": "29.02231159", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/b8b0/f0ca35cb02334aaa3192559fb35f0c90f8fa.pdf"}, {"id": "76d1c6c6b67e67ced1f19a89a5034dafc9599f25", "title": "Understanding OSN-based facial disclosure against face authentication systems", "addresses": [{"address": "Singapore Management University", "lat": "1.29500195", "lng": "103.84909214", "type": "edu"}], "year": 2014, "pdf": "http://doi.acm.org/10.1145/2590296.2590315"}, {"id": "8a12edaf81fd38f81057cf9577c822eb09ff6fc1", "title": "Measuring and mitigating targeted biometric impersonation", "addresses": [{"address": "University of Southampton", "lat": "50.89273635", "lng": "-1.39464295", "type": "edu"}, {"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/8a12/edaf81fd38f81057cf9577c822eb09ff6fc1.pdf"}, {"id": "4b86e711658003a600666d3ccfa4a9905463df1c", "title": "Fusion of Appearance Image and Passive Stereo Depth Map for Face Recognition Based on the Bilateral 2DLDA", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": "2007", "pdf": "https://pdfs.semanticscholar.org/4b86/e711658003a600666d3ccfa4a9905463df1c.pdf"}, {"id": "4b8d80f91d271f61b26db5ad627e24e59955c56a", "title": "Learning Long-Range Vision for an Offroad Robot", "addresses": [{"address": "Courant Institute of Mathematical Sciences", "lat": "40.72864840", "lng": "-73.99568630", "type": "edu"}, {"address": "New York University", "lat": "40.72925325", "lng": "-73.99625394", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/4b8d/80f91d271f61b26db5ad627e24e59955c56a.pdf"}, {"id": "7af15295224c3ad69d56f17ff635763dd008a8a4", "title": "Learning Support Vectors for Face Authentication: Sensitivity to Mis-Registrations", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}, {"address": "Czech Technical University", "lat": "50.07642960", "lng": "14.41802312", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/7af1/5295224c3ad69d56f17ff635763dd008a8a4.pdf"}, {"id": "5ea51401eea9a50a16bd17471bfd559d2d989760", "title": "Robust Face Alignment Based on Hierarchical Classifier Network", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/5ea5/1401eea9a50a16bd17471bfd559d2d989760.pdf"}, {"id": "71644fab2275cfd6a8f770a26aba4e6228e85dec", "title": "Multi-View Discriminant Analysis", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2012, "pdf": "http://www.jdl.ac.cn/doc/2011/20131910365517756_2012_eccv_mnkan_mvda.pdf"}, {"id": "280bc9751593897091015aaf2cab39805768b463", "title": "Gender Perception From Faces Using Boosted LBPH (Local Binary Patten Histograms)", "addresses": [{"address": "COMSATS Institute of Information Technology, Lahore", "lat": "31.40063320", "lng": "74.21372960", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/280b/c9751593897091015aaf2cab39805768b463.pdf"}, {"id": "23b80dc704e25cf52b5a14935002fc083ce9c317", "title": "Learning Generative Models via Discriminative Approaches", "addresses": [{"address": "University of California, Los Angeles", "lat": "34.06877880", "lng": "-118.44500940", "type": "edu"}], "year": 2007, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2007.383035"}, {"id": "857ad04fca2740b016f0066b152bd1fa1171483f", "title": "Sample Images can be Independently Restored from Face Recognition Templates", "addresses": [{"address": "University of Ottawa", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/857a/d04fca2740b016f0066b152bd1fa1171483f.pdf"}, {"id": "87b81c8821a2cb9cdf26c75c1531717cab4b942f", "title": "Face Detection with Facial Features and Gender Classification Based On Support Vector Machine", "addresses": [{"address": "Manonmaniam Sundaranar University", "lat": "8.76554685", "lng": "77.65100445", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/87b8/1c8821a2cb9cdf26c75c1531717cab4b942f.pdf"}, {"id": "099ce5cb6f42bff5ad117852d62c5a07e6407b8a", "title": "Spectral Methods for Multi-Scale Feature Extraction and Data Clustering", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/099c/e5cb6f42bff5ad117852d62c5a07e6407b8a.pdf"}, {"id": "21358489b5ce0e94ff37792a8a5eea198e7272f3", "title": "Face Inpainting with Local Linear Representations", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/c0cc/2073cad539d979fc6f860177b531b45fafc1.pdf"}, {"id": "dc4e4b9c507e8be2d832faf64e5a2e8887115265", "title": "Face Retrieval Based on Robust Local Features and Statistical-Structural Learning Approach", "addresses": [{"address": "Tampere University of Technology", "lat": "61.44964205", "lng": "23.85877462", "type": "edu"}], "year": "2008", "pdf": "https://pdfs.semanticscholar.org/dc4e/4b9c507e8be2d832faf64e5a2e8887115265.pdf"}, {"id": "891d435fd1a070bb66225abfd62b2e2c5350e87c", "title": "Selective Feature Generation Method for Classification of Low-dimensional Data", "addresses": [{"address": "Dankook University", "lat": "37.32195750", "lng": "127.12507230", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/891d/435fd1a070bb66225abfd62b2e2c5350e87c.pdf"}, {"id": "854b1f0581f5d3340f15eb79452363cbf38c04c8", "title": "Directional Age-Primitive Pattern (DAPP) for Human Age Group Recognition and Age Estimation", "addresses": [{"address": "Kyung Hee University", "lat": "32.85363330", "lng": "-117.20352860", "type": "edu"}, {"address": "King Saud University", "lat": "24.72464030", "lng": "46.62335012", "type": "edu"}, {"address": "Institute of Information Technology", "lat": "23.72898990", "lng": "90.39826820", "type": "edu"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7903648"}, {"id": "cbb55f5885f9a0d0bfaa2c0bf5293ef45a04c5cd", "title": "Performance Characterisation of Face Recognition Algorithms and Their Sensitivity to Severe Illumination Changes", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": "2006", "pdf": "https://pdfs.semanticscholar.org/cbb5/5f5885f9a0d0bfaa2c0bf5293ef45a04c5cd.pdf"}, {"id": "d8896861126b7fd5d2ceb6fed8505a6dff83414f", "title": "In-plane Rotational Alignment of Faces by Eye and Eye-pair Detection", "addresses": [{"address": "University of Groningen", "lat": "53.21967825", "lng": "6.56251482", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/d889/6861126b7fd5d2ceb6fed8505a6dff83414f.pdf"}, {"id": "e1d1540a718bb7a933e21339f1a2d90660af7353", "title": "Discriminative Probabilistic Latent Semantic Analysis with Application to Single Sample Face Recognition", "addresses": [{"address": "Chongqing University", "lat": "29.50841740", "lng": "106.57858552", "type": "edu"}], "year": "2018", "pdf": "http://doi.org/10.1007/s11063-018-9852-2"}, {"id": "55498d89f9eb0c9df9760f5e0e47a15ae7e92f25", "title": "Learning-based face hallucination in DCT domain", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2008, "pdf": "http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/264.pdf"}, {"id": "f12813073a7f894f82fe2b166893424edba7dc79", "title": "Unified Principal Component Analysis with generalized Covariance Matrix for face recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2008, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2008.4587375"}, {"id": "946c2036c940e77260ade031ba413ec9f2435985", "title": "PCA for Gender Estimation: Which Eigenvectors Contribute?", "addresses": [{"address": "Middle East Technical University", "lat": "39.87549675", "lng": "32.78553506", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/946c/2036c940e77260ade031ba413ec9f2435985.pdf"}, {"id": "a129c30b176820bf7f4756b4b4efc92d2a83f190", "title": "Older adults' associative memory is modified by manner of presentation at encoding and retrieval.", "addresses": [{"address": "Elon University", "lat": "36.10179560", "lng": "-79.50173300", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/a129/c30b176820bf7f4756b4b4efc92d2a83f190.pdf"}, {"id": "e1fac9e9427499d3758213daf1c781b9a42a3420", "title": "Face Image Retrieval Based on Probe Sketch Using SIFT Feature Descriptors", "addresses": [{"address": "National Institute of Technology, Karnataka", "lat": "13.01119095", "lng": "74.79498825", "type": "edu"}], "year": "2012", "pdf": "https://pdfs.semanticscholar.org/7c90/60a809bd28ef61421588f48e33f6eae6ddfd.pdf"}, {"id": "7735f63e5790006cb3d989c8c19910e40200abfc", "title": "Multispectral Imaging For Face Recognition Over Varying Illumination", "addresses": [{"address": "University of Tennessee", "lat": "35.95424930", "lng": "-83.93073950", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/7735/f63e5790006cb3d989c8c19910e40200abfc.pdf"}, {"id": "f909d04c809013b930bafca12c0f9a8192df9d92", "title": "Single Image Subspace for Face Recognition", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/f909/d04c809013b930bafca12c0f9a8192df9d92.pdf"}, {"id": "33abfe693258a4e00467494b11ee4d523379ab6b", "title": "Local Discriminant Embedding with Tensor Representation", "addresses": [{"address": "Hong Kong University of Science and Technology", "lat": "22.33863040", "lng": "114.26203370", "type": "edu"}], "year": 2006, "pdf": "http://www.cse.ust.hk/~dyyeung/paper/pdf/yeung.icip2006a.pdf"}, {"id": "a1997d89f544cc862c63a972ef364b2ff38982e9", "title": "Can SNOMED CT Changes Be Used as a Surrogate Standard for Evaluating the Performance of Its Auditing Methods?", "addresses": [{"address": "University of Kentucky", "lat": "38.03337420", "lng": "-84.50177580", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/a199/7d89f544cc862c63a972ef364b2ff38982e9.pdf"}, {"id": "aeb64f88302b9d4d23ee13ece5c9842dd43dc37f", "title": "Recollection and confidence in two-alternative forced choice episodic recognition", "addresses": [{"address": "University of Newcastle", "lat": "-33.35788990", "lng": "151.37834708", "type": "edu"}], "year": "2009", "pdf": "https://pdfs.semanticscholar.org/aeb6/4f88302b9d4d23ee13ece5c9842dd43dc37f.pdf"}, {"id": "e392816ec3e0b131bbab06431ac85b14afa7d656", "title": "A Simple and Efficient Supervised Method for Spatially Weighted PCA in Face Image Analysis", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/e392/816ec3e0b131bbab06431ac85b14afa7d656.pdf"}, {"id": "3e76496aa3840bca2974d6d087bfa4267a390768", "title": "Dictionary Learning in Optimal Metric Subspace", "addresses": [{"address": "Xidian University", "lat": "34.12358250", "lng": "108.83546000", "type": "edu"}, {"address": "Beihang University", "lat": "39.98083330", "lng": "116.34101249", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/3e76/496aa3840bca2974d6d087bfa4267a390768.pdf"}, {"id": "355af3c3adbb17d25f0d2a4193e3daadffc0d4e8", "title": "Pattern recognition: Historical perspective and future directions", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}, {"address": "George Mason University", "lat": "38.83133325", "lng": "-77.30798839", "type": "edu"}], "year": 2000, "pdf": "http://pdfs.semanticscholar.org/355a/f3c3adbb17d25f0d2a4193e3daadffc0d4e8.pdf"}, {"id": "1e6d1e811da743df02481bca1a7bdaa73b809913", "title": "Multimodal person recognition for human-vehicle interaction", "addresses": [{"address": "Sabanci University", "lat": "40.89271590", "lng": "29.37863323", "type": "edu"}], "year": 2006, "pdf": "http://research.sabanciuniv.edu/608/1/3011800001159.pdf"}, {"id": "f4aafb50c93c5ad3e5c4696ed24b063a1932915a", "title": "What would you look like in Springfield? Linear Transformations between High-Dimensional Spaces", "addresses": [{"address": "University of Bonn", "lat": "50.73381240", "lng": "7.10224650", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/f4aa/fb50c93c5ad3e5c4696ed24b063a1932915a.pdf"}, {"id": "10156890bc53cb6be97bd144a68fde693bf13612", "title": "Face Recognition Using Sparse Representation-Based Classification on K-Nearest Subspace", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/1015/6890bc53cb6be97bd144a68fde693bf13612.pdf"}, {"id": "16820ccfb626dcdc893cc7735784aed9f63cbb70", "title": "Real-time embedded age and gender classification in unconstrained video", "addresses": [{"address": "University of Ottawa", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W12/papers/Azarmehr_Real-Time_Embedded_Age_2015_CVPR_paper.pdf"}, {"id": "966b76acfa75253679b1a82ecc5a68e523f5c0c9", "title": "Preference suppression caused by misattribution of task-irrelevant subliminal motion.", "addresses": [{"address": "Boston University", "lat": "42.35042530", "lng": "-71.10056114", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/f204/2494d5666e436f5e96ff5e0cd3b5f5e5485b.pdf"}, {"id": "0c7f27d23a162d4f3896325d147f412c40160b52", "title": "Models and Algorithms for Vision through the Atmosphere", "addresses": [{"address": "Columbia University", "lat": "40.84198360", "lng": "-73.94368971", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/0c7f/27d23a162d4f3896325d147f412c40160b52.pdf"}, {"id": "6069b4bc1a21341b77b49f01341c238c770d52e0", "title": "Comparing Kernel-based Learning Methods for Face Recognition", "addresses": [{"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/b02b/50ed995fe526208b1577b9d7ef6262bf3ecf.pdf"}, {"id": "af31ef1e81c1132f186d7aebb141d7f59a815010", "title": "Domain-specific progressive sampling of face images", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "London, United Kingdom", "lat": "51.50732190", "lng": "-0.12764740", "type": "edu"}], "year": 2013, "pdf": "http://cas.ee.ic.ac.uk/people/ccb98/papers/LiuGlobalSIP13.pdf"}, {"id": "07f31bef7a7035792e3791473b3c58d03928abbf", "title": "Lessons from collecting a million biometric samples", "addresses": [{"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}, {"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1016/j.imavis.2016.08.004"}, {"id": "d275714c323dd4e400e8003fa8c33070f8ea03d1", "title": "White Fear, Dehumanization and Low Empathy: a Lethal Combination for Shooting Biases by Yara Mekawi", "addresses": [{"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": "2014", "pdf": "https://pdfs.semanticscholar.org/d275/714c323dd4e400e8003fa8c33070f8ea03d1.pdf"}, {"id": "1a5a79b4937b89420049bc279a7b7f765d143881", "title": "Are Rich People Perceived as More Trustworthy? Perceived Socioeconomic Status Modulates Judgments of Trustworthiness and Trust Behavior Based on Facial Appearance", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}, {"address": "Virginia Commonwealth University", "lat": "37.54821500", "lng": "-77.45306424", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/1a5a/79b4937b89420049bc279a7b7f765d143881.pdf"}, {"id": "88ee6d0b8342852a5bd55864dc7a1c8452c10bbf", "title": "Support Vector Machines Applied to Face Recognition", "addresses": [{"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}], "year": 1998, "pdf": "http://pdfs.semanticscholar.org/88ee/6d0b8342852a5bd55864dc7a1c8452c10bbf.pdf"}, {"id": "59f83e94a7f52cbb728d434426f6fe85f756259c", "title": "An Improved Illumination Normalization Approach based on Wavelet Tranform for Face Recognition from Single Training Image Per Person", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/59f8/3e94a7f52cbb728d434426f6fe85f756259c.pdf"}, {"id": "5d1c4e93e32ee686234c5aae7f38025523993c8c", "title": "Towards Pose Robust Face Recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2013, "pdf": "http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d539.pdf"}, {"id": "53ce84598052308b86ba79d873082853022aa7e9", "title": "Optimized Method for Real-Time Face Recognition System Based on PCA and Multiclass Support Vector Machine", "addresses": [{"address": "Islamic Azad University", "lat": "34.84529990", "lng": "48.55962120", "type": "edu"}], "year": "2013", "pdf": "https://pdfs.semanticscholar.org/4f07/b70883a98a69be3b3e29de06c73e59a9ba0e.pdf"}, {"id": "eef05b87f1a62bf658fc622427187eab4fb0f7a5", "title": "High Performance Human Face Recognition using Independent High Intensity Gabor Wavelet Responses: A Statistical Approach", "addresses": [{"address": "Jadavpur University", "lat": "22.56115370", "lng": "88.41310194", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/eef0/5b87f1a62bf658fc622427187eab4fb0f7a5.pdf"}, {"id": "f5c285c3729188884f448db3cc60647f15e289d3", "title": "Sorted Index Numbers for Privacy Preserving Face Recognition", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/f5c2/85c3729188884f448db3cc60647f15e289d3.pdf"}, {"id": "45a3ba54fc2210cf8a4fba0cbdce9dad3cefc826", "title": "Complete Cross-Validation for Nearest Neighbor Classifiers", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2000, "pdf": "http://pdfs.semanticscholar.org/45a3/ba54fc2210cf8a4fba0cbdce9dad3cefc826.pdf"}, {"id": "71e942e05f73b163a7ec814a85ff4131cb48f650", "title": "The BANCA Database and Evaluation Protocol", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/8f83/e1a0c05da3a2f316b75b4a178fadf709dd68.pdf"}, {"id": "1fe0c5562c8dffecc0cadeef2c592bfa6e89b5ca", "title": "Illumination invariant face recognition based on neural network ensemble", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}, {"address": "North Dakota State University", "lat": "46.89715500", "lng": "-96.81827603", "type": "edu"}], "year": 2004, "pdf": "http://cs.boisestate.edu/~dxu/publications/ICTAI04.pdf"}, {"id": "58da4e59c4d259196fc6bd807bc8c36636efa4ef", "title": "Symmetrical PCA in face recognition", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/58da/4e59c4d259196fc6bd807bc8c36636efa4ef.pdf"}, {"id": "c901524f01c7a0db3bb01afa1d5828913c84628a", "title": "Image Region Selection and Ensemble for Face Recognition", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2006, "pdf": "https://cs.nju.edu.cn/zhouzh/zhouzh.files/publication/jcst06.pdf"}, {"id": "221c9fff1c25368a6b72ca679c67a3d6b35e2c00", "title": "Memory-Based Face Recognition for Visitor Identification", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2000, "pdf": "http://pdfs.semanticscholar.org/5ccb/f66733438ab42fe2da66ad1d37635f4391de.pdf"}, {"id": "fc798314994bf94d1cde8d615ba4d5e61b6268b6", "title": "Face Recognition : face in video , age invariance , and facial marks", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/fc79/8314994bf94d1cde8d615ba4d5e61b6268b6.pdf"}, {"id": "a3bc6020cd57ebe3a82a0b232f969bcc4e372e53", "title": "A Hybrid Feature Extraction Technique for Face Recognition", "addresses": [{"address": "University of Wollongong", "lat": "-34.40505545", "lng": "150.87834655", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/a3bc/6020cd57ebe3a82a0b232f969bcc4e372e53.pdf"}, {"id": "13d591220f9fdb22d81c2438a008c80843b61fd4", "title": "Boosting Multi-gabor Subspaces for Face Recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": "2006", "pdf": "https://pdfs.semanticscholar.org/13d5/91220f9fdb22d81c2438a008c80843b61fd4.pdf"}, {"id": "621e8882c41cdaf03a2c4a986a6404f0272ba511", "title": "On robust biometric identity verification via sparse encoding of faces: Holistic vs local approaches", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": 2012, "pdf": "https://doi.org/10.1109/IJCNN.2012.6252611"}, {"id": "8780f14d04671d4f2ed50307d16062d72cc51863", "title": "Likelihood Ratio-Based Detection of Facial Features", "addresses": [{"address": "University of Twente", "lat": "52.23801390", "lng": "6.85667610", "type": "edu"}], "year": 2000, "pdf": "http://pdfs.semanticscholar.org/8780/f14d04671d4f2ed50307d16062d72cc51863.pdf"}, {"id": "7a52eb0886892c04c6c80b78795d880a70796cb6", "title": "Perceptual distance normalization for appearance detection", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2004, "pdf": "http://www.cs.toronto.edu/~jepson/papers/ChennubhotlaJepsonICPR2004.pdf"}, {"id": "1fe121925668743762ce9f6e157081e087171f4c", "title": "Unsupervised learning of overcomplete face descriptors", "addresses": [{"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": 2015, "pdf": "https://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W02/papers/Ylioinas_Unsupervised_Learning_of_2015_CVPR_paper.pdf"}, {"id": "f3cb97791ded4a5c3bed717f820215a1c9648226", "title": "Multi-scale Block Weber Local Descriptor for Face Recognition", "addresses": [{"address": "Chongqing University", "lat": "29.50841740", "lng": "106.57858552", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/f3cb/97791ded4a5c3bed717f820215a1c9648226.pdf"}, {"id": "d28d697b578867500632b35b1b19d3d76698f4a9", "title": "Face Recognition Using Shape and Texture", "addresses": [{"address": "George Mason University", "lat": "38.83133325", "lng": "-77.30798839", "type": "edu"}], "year": 1999, "pdf": "http://pdfs.semanticscholar.org/d28d/697b578867500632b35b1b19d3d76698f4a9.pdf"}, {"id": "5a5ae31263517355d15b7b09d74cb03e40093046", "title": "Super Resolution and Face Recognition Based People Activity Monitoring Enhancement Using Surveillance Camera", "addresses": [{"address": "University of Tartu", "lat": "58.38131405", "lng": "26.72078081", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/5a5a/e31263517355d15b7b09d74cb03e40093046.pdf"}, {"id": "82524c49ea20390c711e0606e50570ac2183c281", "title": "(2D)PCA: 2-Directional 2-Dimensional PCA for Efficient Face Representation and Recognition", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/8252/4c49ea20390c711e0606e50570ac2183c281.pdf"}, {"id": "b13a882e6168afc4058fe14cc075c7e41434f43e", "title": "Recognition of Humans and Their Activities Using Video", "addresses": [{"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/b13a/882e6168afc4058fe14cc075c7e41434f43e.pdf"}, {"id": "ac9516a589901f1421e8ce905dd8bc5b689317ca", "title": "A Practical Framework for Executing Complex Queries over Encrypted Multimedia Data", "addresses": [{"address": "University of Texas at Dallas", "lat": "32.98207990", "lng": "-96.75662780", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/ac95/16a589901f1421e8ce905dd8bc5b689317ca.pdf"}, {"id": "cd0503a31a9f9040736ccfb24086dc934508cfc7", "title": "Maximizing Resource Utilization In Video Streaming Systems", "addresses": [{"address": "Wayne State University", "lat": "42.35775700", "lng": "-83.06286711", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/cd05/03a31a9f9040736ccfb24086dc934508cfc7.pdf"}, {"id": "183c10b7d9ff26576e13a6639de0f7af206ed058", "title": "Face recognition based on frontal views generated from non-frontal images", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": 2005, "pdf": "http://gravis.cs.unibas.ch/publications/CVPR05_Blanz.pdf"}, {"id": "96d34c1a749e74af0050004162d9dc5132098a79", "title": "High-speed face recognition based on discrete cosine transform and RBF neural networks", "addresses": [{"address": "Nanyang Technological University, Singapore", "lat": "1.34619520", "lng": "103.68154990", "type": "edu"}], "year": 2005, "pdf": "https://doi.org/10.1109/TNN.2005.844909"}, {"id": "559645d2447004355c83737a19c9a811b45780f1", "title": "Combining view-based pose normalization and feature transform for cross-pose face recognition", "addresses": [{"address": "Istanbul Technical University", "lat": "41.10427915", "lng": "29.02231159", "type": "edu"}, {"address": "Karlsruhe Institute of Technology", "lat": "49.10184375", "lng": "8.43312560", "type": "edu"}, {"address": "\u00c9cole Polytechnique F\u00e9d\u00e9rale de Lausanne", "lat": "46.51841210", "lng": "6.56846540", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICB.2015.7139114"}, {"id": "bc866c2ced533252f29cf2111dd71a6d1724bd49", "title": "A Multi-Modal Face Recognition Method Using Complete Local Derivative Patterns and Depth Maps", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/bc86/6c2ced533252f29cf2111dd71a6d1724bd49.pdf"}, {"id": "63a584487beb7382cad8ed70020f108ded5bf076", "title": "Face Detection and Modeling for Recognition", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2002", "pdf": "https://pdfs.semanticscholar.org/2bb3/4f45b1f0ae2b602a6f25f1966cd0f84e3f5f.pdf"}, {"id": "5e6c23d2e2f92a90bd35bdbc937b2d7d95ee2d55", "title": "Fusion of Wavelet Coefficients from Visual and Thermal Face Images for Human Face Recognition - A Comparative Study", "addresses": [{"address": "Jadavpur University", "lat": "22.56115370", "lng": "88.41310194", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/5e6c/23d2e2f92a90bd35bdbc937b2d7d95ee2d55.pdf"}, {"id": "c03e01717b2d93f04cce9b5fd2dcfd1143bcc180", "title": "Locality-Constrained Active Appearance Model", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/c03e/01717b2d93f04cce9b5fd2dcfd1143bcc180.pdf"}, {"id": "4d15254f6f31356963cc70319ce416d28d8924a3", "title": "Quo vadis Face Recognition?", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Robotics Institute", "lat": "13.65450525", "lng": "100.49423171", "type": "edu"}, {"address": "University of Pittsburgh", "lat": "40.44415295", "lng": "-79.96243993", "type": "edu"}], "year": 2001, "pdf": "http://pdfs.semanticscholar.org/4d15/254f6f31356963cc70319ce416d28d8924a3.pdf"}, {"id": "1dad684de1ce4c013ba04eb4b1a70355b3786ecd", "title": "Computers Seeing People", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": 1999, "pdf": "http://pdfs.semanticscholar.org/933d/06908b782279b1127c9ba498d868b26ffe8e.pdf"}, {"id": "52909a123ba3b088a5a93d930dcd029ec2f1f24f", "title": "A Gabor-Block-Based Kernel Discriminative Common Vector Approach Using Cosine Kernels for Human Face Recognition", "addresses": [{"address": "Jadavpur University", "lat": "22.56115370", "lng": "88.41310194", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/5d05/a0deec42a061541bbd399bc9e40d4ad3374a.pdf"}, {"id": "b374391ab793a1bb2ecde4df51be9d97c2cbf79a", "title": "Improved PCA based Face Recognition using Feature based Classifier Ensemble", "addresses": [{"address": "Eastern Mediterranean University", "lat": "35.14479945", "lng": "33.90492318", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/b374/391ab793a1bb2ecde4df51be9d97c2cbf79a.pdf"}, {"id": "d10cfcf206b0991e3bc20ac28df1f61c63516f30", "title": "Smile or smirk? Automatic detection of spontaneous asymmetric smiles to understand viewer experience", "addresses": [{"address": "Affectiva, Inc.", "lat": "42.35730460", "lng": "-71.05824150", "type": "company"}], "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553776"}, {"id": "bf0836e5c10add0b13005990ba019a9c4b744b06", "title": "An enhanced independent component-based human facial expression recognition from video", "addresses": [{"address": "Kyung Hee University", "lat": "32.85363330", "lng": "-117.20352860", "type": "edu"}], "year": 2009, "pdf": "https://doi.org/10.1109/TCE.2009.5373791"}, {"id": "051f03bc25ec633592aa2ff5db1d416b705eac6c", "title": "Partial face recognition: An alignment free approach", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2011, "pdf": "http://www.cse.msu.edu/biometrics/Publications/Face/LiaoJain_PartialFR_AlignmentFreeApproach_ICJB11.pdf"}, {"id": "aba31184783150c723805831cde0f22fe257b835", "title": "Contribution of Non-scrambled Chroma Information in Privacy-Protected Face Images to Privacy Leakage", "addresses": [{"address": "Korea Advanced Institute of Science and Technology", "lat": "36.36971910", "lng": "127.36253700", "type": "edu"}, {"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/aba3/1184783150c723805831cde0f22fe257b835.pdf"}, {"id": "7ef41e2be5116912fe8a4906b4fb89ac9dcf819d", "title": "A hybrid face recognition method using Markov random fields", "addresses": [{"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}], "year": 2004, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICPR.2004.1334492"}, {"id": "757e4cb981e807d83539d9982ad325331cb59b16", "title": "Demographics versus Biometric Automatic Interoperability", "addresses": [{"address": "George Mason University", "lat": "38.83133325", "lng": "-77.30798839", "type": "edu"}, {"address": "Sapienza University of Rome", "lat": "41.90376260", "lng": "12.51443840", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/757e/4cb981e807d83539d9982ad325331cb59b16.pdf"}, {"id": "67c08e2b8b918a61dcbd0d4c63a74b89b833d259", "title": "Multi-class texture analysis in colorectal cancer histology", "addresses": [{"address": "University of Perugia", "lat": "49.26224210", "lng": "-123.24500520", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/67c0/8e2b8b918a61dcbd0d4c63a74b89b833d259.pdf"}, {"id": "ac942c4870e55fe1d9822d62edcdb685d41cd2bf", "title": "Pose Discriminiation and Eye Detection Using Support Vector Machines (SVM)", "addresses": [{"address": "George Mason University", "lat": "38.83133325", "lng": "-77.30798839", "type": "edu"}, {"address": "University of Minnesota", "lat": "44.97308605", "lng": "-93.23708813", "type": "edu"}], "year": 1998, "pdf": "http://pdfs.semanticscholar.org/ac94/2c4870e55fe1d9822d62edcdb685d41cd2bf.pdf"}, {"id": "a632ebe6f1e7d9b2b652b0186abef8db218037f3", "title": "Subliminally and Supraliminally Acquired Long-Term Memories Jointly Bias Delayed Decisions", "addresses": [{"address": "University of Bristol", "lat": "51.45848370", "lng": "-2.60977520", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/a632/ebe6f1e7d9b2b652b0186abef8db218037f3.pdf"}, {"id": "027f769aed0cfcb3169ef60f182ce1decc0e99eb", "title": "Local Directional Pattern (LDP) for face recognition", "addresses": [{"address": "Kyung Hee University", "lat": "32.85363330", "lng": "-117.20352860", "type": "edu"}], "year": 2010, "pdf": "http://www.ijicic.org/10-12018-1.pdf"}, {"id": "edd6ed94207ab614c71ac0591d304a708d708e7b", "title": "Reconstructive discriminant analysis: A feature extraction method induced from linear regression classification", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": "2012", "pdf": "http://doi.org/10.1016/j.neucom.2012.02.001"}, {"id": "5dbf772b98cb944befa9cf01ec5d15da713a338b", "title": "Face modeling for recognition", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2001, "pdf": "http://pdfs.semanticscholar.org/9d82/44d5a32ecc314860c1d673d687df28f77d84.pdf"}, {"id": "8356b642e4e9bb39bd26ea6c4b9bad21bd9b1912", "title": "Seeing People in the Dark: Face Recognition in Infrared Images", "addresses": [{"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/8356/b642e4e9bb39bd26ea6c4b9bad21bd9b1912.pdf"}, {"id": "2b73e3d541b0208ae54b3920fef4bfd9fd0c84a7", "title": "Feature-based face representations and image reconstruction from behavioral and neural data.", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}, {"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/2b73/e3d541b0208ae54b3920fef4bfd9fd0c84a7.pdf"}, {"id": "1e8d0998c69caf6e9495db1d6df562f8b9e90003", "title": "Solving the Small Sample Size Problem of LDA", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/1e8d/0998c69caf6e9495db1d6df562f8b9e90003.pdf"}, {"id": "99b93f67c3b2b0a474bf5670a7dd40a6a0e849ac", "title": "NIMBLER: A Model of Visual Attention and Object Recognition With a Biologically Plausible Retina", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/99b9/3f67c3b2b0a474bf5670a7dd40a6a0e849ac.pdf"}, {"id": "9729930ab0f9cbcd07f1105bc69c540330cda50a", "title": "Compressing Fisher Vector for Robust Face Recognition", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ACCESS.2017.2749331"}, {"id": "459eb3cfd9b52a0d416571e4bc4e75f979f4b901", "title": "Vision development of humanoid head robot SHFR-III", "addresses": [{"address": "Shanghai University", "lat": "31.32235655", "lng": "121.38400941", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ROBIO.2015.7418998"}, {"id": "14b2dff604f148c4e5b54aa25fbecbf7f9071205", "title": "A new preselection method for face recognition in JPEG domain based on face segmentation", "addresses": [{"address": "Islamic Azad University", "lat": "34.84529990", "lng": "48.55962120", "type": "edu"}], "year": 2011, "pdf": "http://www.iranprc.org/pdf/paper/2011-06.pdf"}, {"id": "ff47698be7313005d0ea0fe0cc72c13f2f4b092a", "title": "Caring or daring? Exploring the impact of facial masculinity/femininity and gender category information on first impressions", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/ff47/698be7313005d0ea0fe0cc72c13f2f4b092a.pdf"}, {"id": "0c6a18b0cee01038eb1f9373c369835b236373ae", "title": "Learning warps based similarity for pose-unconstrained face recognition", "addresses": [{"address": "Chonbuk National University", "lat": "35.84658875", "lng": "127.13501330", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1007/s11042-017-4359-9"}, {"id": "54e6343f4368d9e5468c3e83b6eeb3a58a3c7555", "title": "Reconstructing Perceived and Retrieved Faces from Activity Patterns in Lateral Parietal Cortex.", "addresses": [{"address": "New York University", "lat": "40.72925325", "lng": "-73.99625394", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/54e6/343f4368d9e5468c3e83b6eeb3a58a3c7555.pdf"}, {"id": "00d6e5a1b347463f6aeb08a10cd912273c9d1347", "title": "Face Recognition Vendor Test 2002 : Evaluation Report", "addresses": [{"address": "DARPA", "lat": "38.88334130", "lng": "-77.10459770", "type": "mil"}, {"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/00d6/e5a1b347463f6aeb08a10cd912273c9d1347.pdf"}, {"id": "dc964b9c7242a985eb255b2410a9c45981c2f4d0", "title": "Feature Extraction by Using Dual-Generalized Discriminative Common Vectors", "addresses": [{"address": "Universitat Aut\u00f2noma de Barcelona", "lat": "41.50078110", "lng": "2.11143663", "type": "edu"}], "year": "2018", "pdf": "http://doi.org/10.1007/s10851-018-0837-6"}, {"id": "8023864256a1a4a26e130a7165f3d70875c27467", "title": "LUT-Based Adaboost for Gender Classification", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/8023/864256a1a4a26e130a7165f3d70875c27467.pdf"}, {"id": "2a77e3221d0512aa5674cf6f9041c1ce81fc07f0", "title": "An Automatic Hybrid Segmentation Approach for Aligned Face Portrait Images", "addresses": [{"address": "Graz University of Technology", "lat": "47.05821000", "lng": "15.46019568", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/65de/08bab21921fba39e97f0bc3585f62cb2bd5d.pdf"}, {"id": "aff92784567095ee526a705e21be4f42226bbaab", "title": "Face recognition in uncontrolled environments", "addresses": [{"address": "University College London", "lat": "51.52316070", "lng": "-0.12820370", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/aff9/2784567095ee526a705e21be4f42226bbaab.pdf"}, {"id": "a7d7fba176e442f60899c57b976ae6de6d013ceb", "title": "Gender differences in experiential and facial reactivity to approval and disapproval during emotional social interactions", "addresses": [{"address": "University of Salzburg", "lat": "47.79475945", "lng": "13.05417525", "type": "edu"}, {"address": "University of Amsterdam", "lat": "52.35536550", "lng": "4.95016440", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/a7d7/fba176e442f60899c57b976ae6de6d013ceb.pdf"}, {"id": "ca50b25eaad0c9146fc5a4a2cd4c472c77b970ba", "title": "Face Recognition Using Histogram-based Features in Spatial and Frequency Domains", "addresses": [{"address": "Kogakuin University", "lat": "35.69027840", "lng": "139.69540096", "type": "edu"}, {"address": "Tohoku University", "lat": "38.25309450", "lng": "140.87365930", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/ca50/b25eaad0c9146fc5a4a2cd4c472c77b970ba.pdf"}, {"id": "79dc9a1aa2ab7fa46e8024bd654a4a5776c1a6d6", "title": "Robust non-rigid 3D tracking for face recognition in real-world videos", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2011, "pdf": "http://mmlab.siat.ac.cn/sfchen-old/Publications/ICIA11-3Dtracking.pdf"}, {"id": "ffe4bb47ec15f768e1744bdf530d5796ba56cfc1", "title": "AFIF4: Deep Gender Classification based on AdaBoost-based Fusion of Isolated Facial Features and Foggy Faces", "addresses": [{"address": "York University", "lat": "43.77439110", "lng": "-79.50481085", "type": "edu"}, {"address": "Assiut University", "lat": "27.18794105", "lng": "31.17009498", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1706.04277.pdf"}, {"id": "cd6aaa37fffd0b5c2320f386be322b8adaa1cc68", "title": "Deep Face Recognition: A Survey", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.06655.pdf"}, {"id": "f2ad9b43bac8c2bae9dea694f6a4e44c760e63da", "title": "A Study on Illumination Invariant Face Recognition Methods Based on Multiple Eigenspaces", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}, {"address": "North Dakota State University", "lat": "46.89715500", "lng": "-96.81827603", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/f2ad/9b43bac8c2bae9dea694f6a4e44c760e63da.pdf"}, {"id": "933d06908b782279b1127c9ba498d868b26ffe8e", "title": "Computers Seeing People", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": "1999", "pdf": "https://pdfs.semanticscholar.org/933d/06908b782279b1127c9ba498d868b26ffe8e.pdf"}, {"id": "9e31e77f9543ab42474ba4e9330676e18c242e72", "title": "The Devil of Face Recognition is in the Noise", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11649.pdf"}, {"id": "769d1a0aff0cf7842c7861d30ce654a029d6b467", "title": "Descriptor Learning Based on Fisher Separation Criterion for Texture Classification", "addresses": [{"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/769d/1a0aff0cf7842c7861d30ce654a029d6b467.pdf"}, {"id": "380862d22617064ffab1a3b42f0b11752d6bd785", "title": "Recognition from a Single Sample per Person with Multiple SOM Fusion", "addresses": [{"address": "Fudan University", "lat": "31.30104395", "lng": "121.50045497", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/3808/62d22617064ffab1a3b42f0b11752d6bd785.pdf"}, {"id": "f2cc459ada3abd9d8aa82e92710676973aeff275", "title": "Object class recognition using range of multiple computer vision algorithms", "addresses": [{"address": "South East European University", "lat": "41.98676415", "lng": "20.96254516", "type": "edu"}], "year": 2011, "pdf": "http://ieeexplore.ieee.org/document/5967185/"}, {"id": "4bc55ffc2f53801267ca1767028515be6e84f551", "title": "The Decision to Engage Cognitive Control Is Driven by Expected Reward-Value: Neural and Behavioral Evidence", "addresses": [{"address": "University of British Columbia", "lat": "49.25839375", "lng": "-123.24658161", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/4bc5/5ffc2f53801267ca1767028515be6e84f551.pdf"}, {"id": "0fae5d9d2764a8d6ea691b9835d497dd680bbccd", "title": "Face Recognition using Canonical Correlation Analysis", "addresses": [{"address": "Indian Institute of Technology Delhi", "lat": "28.54632595", "lng": "77.27325504", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/0fae/5d9d2764a8d6ea691b9835d497dd680bbccd.pdf"}, {"id": "71ed20748c919cd261024b146992ced4c9c2157b", "title": "Learning Semantic Patterns with Discriminant Localized Binary Projections", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "Microsoft Research Asia", "lat": "39.97721700", "lng": "116.33763200", "type": "company"}, {"address": "Beckman Institute", "lat": "40.11571585", "lng": "-88.22750772", "type": "edu"}], "year": 2006, "pdf": "http://mmlab.ie.cuhk.edu.hk/archive/2006/01640756.pdf"}, {"id": "9103148dd87e6ff9fba28509f3b265e1873166c9", "title": "Face Analysis using 3D Morphable Models", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/9103/148dd87e6ff9fba28509f3b265e1873166c9.pdf"}, {"id": "80cef64706957c53a31b67045d208efe39205c9e", "title": "Deficits in other-race face recognition: no evidence for encoding-based effects.", "addresses": [{"address": "Arizona State University", "lat": "33.30715065", "lng": "-111.67653157", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/80ce/f64706957c53a31b67045d208efe39205c9e.pdf"}, {"id": "ce0aa94c79f60c35073f434a7fd6987180f81527", "title": "Achieving Anonymity against Major Face Recognition Algorithms", "addresses": [{"address": "Ruhr-University Bochum", "lat": "51.44415765", "lng": "7.26096541", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/ce0a/a94c79f60c35073f434a7fd6987180f81527.pdf"}, {"id": "eb6f15c59e6f2ffaa9a0a55d3f045c23a5a6d275", "title": "State-Trace Analysis of the Face Inversion Effect", "addresses": [{"address": "University of Newcastle", "lat": "-33.35788990", "lng": "151.37834708", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/eb6f/15c59e6f2ffaa9a0a55d3f045c23a5a6d275.pdf"}, {"id": "db3e78704df982b2af92282e4a74aa3b59ea3a2e", "title": "A recurrent dynamic model for correspondence-based face recognition.", "addresses": [{"address": "University College London", "lat": "51.52316070", "lng": "-0.12820370", "type": "edu"}], "year": "2008", "pdf": "https://pdfs.semanticscholar.org/1e69/9d9e0470c5d39ff78eaf21b394a90691c513.pdf"}, {"id": "7589bded8fed54d6eb7800d24ace662b37ed0779", "title": "Face Recognition Algorithm Using Muti-direction Markov Stationary Features and Adjacent Pixel Intensity Difference Quantization Histogram", "addresses": [{"address": "Tohoku University", "lat": "38.25309450", "lng": "140.87365930", "type": "edu"}], "year": "2012", "pdf": "https://pdfs.semanticscholar.org/7589/bded8fed54d6eb7800d24ace662b37ed0779.pdf"}, {"id": "344a5802999dddd0a6d1c4d511910af2eb922231", "title": "DroneFace: An Open Dataset for Drone Research", "addresses": [{"address": "Feng Chia University", "lat": "24.18005755", "lng": "120.64836072", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/f0ba/552418698d1b881c6f9f02e2c84f969e66f3.pdf"}, {"id": "7c87f445a15597f603756587e0f9b8cf4d942ecc", "title": "Analysis of Sampling Techniques for Learning Binarized Statistical Image Features Using Fixations and Salience", "addresses": [{"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/7c87/f445a15597f603756587e0f9b8cf4d942ecc.pdf"}, {"id": "04e06481e455c6eb838c22e8505dafc01b7d0cfa", "title": "L<inf>1</inf> regularized projection pursuit for additive model learning", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2008, "pdf": "http://mmlab.ie.cuhk.edu.hk/archive/2008/L1.pdf"}, {"id": "841855205818d3a6d6f85ec17a22515f4f062882", "title": "Low Resolution Face Recognition in the Wild", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.11529.pdf"}, {"id": "05bd6c2bc5dc6d65c48c6366788441bcfdd9db3a", "title": "Personalizing Smart Environments: Face Recognition for Human Interaction", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 1999, "pdf": "http://pdfs.semanticscholar.org/05bd/6c2bc5dc6d65c48c6366788441bcfdd9db3a.pdf"}, {"id": "9902acd6ce7662c93ee2bd41c6c11a6b99ad8754", "title": "Robust Multimodal Biometric System using Markov Chain based Rank Level Fusion", "addresses": [{"address": "University of Calgary", "lat": "51.07840380", "lng": "-114.12870770", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/9902/acd6ce7662c93ee2bd41c6c11a6b99ad8754.pdf"}, {"id": "6342a4c54835c1e14159495373ab18b4233d2d9b", "title": "Towards Pose-robust Face Recognition on Video", "addresses": [{"address": "Queensland University of Technology", "lat": "-27.47715625", "lng": "153.02841004", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/6342/a4c54835c1e14159495373ab18b4233d2d9b.pdf"}, {"id": "241416b1249d2b71b373f8dcf054110d579a2148", "title": "Biometric face recognition using multilinear projection and artificial intelligence", "addresses": [{"address": "Newcastle University", "lat": "54.98023235", "lng": "-1.61452627", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/2414/16b1249d2b71b373f8dcf054110d579a2148.pdf"}, {"id": "001d909eb3513fb6fad8fb2355971441255458c3", "title": "Minimal local reconstruction error measure based discriminant feature extraction and classification", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2008, "pdf": "http://mplab.ucsd.edu/wordpress/wp-content/uploads/CVPR2008/Conference/data/papers/023.pdf"}, {"id": "95d567081510e8e59834febc958668015c174602", "title": "Combining Gabor features: summing vs. voting in human face recognition", "addresses": [{"address": "Wayne State University", "lat": "42.35775700", "lng": "-83.06286711", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/95d5/67081510e8e59834febc958668015c174602.pdf"}, {"id": "e984017c5849ea78e3f50e374a5539770989536d", "title": "Bilinear Discriminant Analysis for Face Recognition", "addresses": [{"address": "\u00c9cole Centrale de Lyon", "lat": "45.78359660", "lng": "4.76789480", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/e984/017c5849ea78e3f50e374a5539770989536d.pdf"}, {"id": "b313751548018e4ecd5ae2ce6b3b94fbd9cae33e", "title": "Evaluation of Face Datasets as Tools for Assessing the\u00a0Performance of Face Recognition Methods", "addresses": [{"address": "National Institutes of Health", "lat": "39.00041165", "lng": "-77.10327775", "type": "edu"}], "year": "2008", "pdf": "http://doi.org/10.1007/s11263-008-0143-7"}, {"id": "ed9d11e995baeec17c5d2847ec1a8d5449254525", "title": "Efficient Gender Classification Using a Deep LDA-Pruned Net", "addresses": [{"address": "McGill University", "lat": "45.50397610", "lng": "-73.57496870", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/ed9d/11e995baeec17c5d2847ec1a8d5449254525.pdf"}, {"id": "385fa8768d174a9044bc723548a7f8810a62606c", "title": "Using an holistic method based on prior information to represent global and local variations on face images", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/385f/a8768d174a9044bc723548a7f8810a62606c.pdf"}, {"id": "826f1ac8ef16abd893062fdf5058a09881aed516", "title": "Identity-Preserving Face Recovery from Portraits", "addresses": [{"address": "Australian National University", "lat": "-35.27769990", "lng": "149.11852700", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1801.02279.pdf"}, {"id": "1d5219687b9e63767f19cd804147c256c5a5a3bc", "title": "Patch-based locality-enhanced collaborative representation for face recognition", "addresses": [{"address": "Tianjin University", "lat": "36.20304395", "lng": "117.05842113", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/1d52/19687b9e63767f19cd804147c256c5a5a3bc.pdf"}, {"id": "c423b0a0b7232a5cd0c3f4c75164923a3f04cdcd", "title": "Kernel Discriminant Learning with Application to Face Recognition", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/c423/b0a0b7232a5cd0c3f4c75164923a3f04cdcd.pdf"}, {"id": "a1c1970f7c728cc96aea798d65d38df7c9ea61dc", "title": "Eye Location Using Genetic Algorithm", "addresses": [{"address": "George Mason University", "lat": "38.83133325", "lng": "-77.30798839", "type": "edu"}], "year": 1999, "pdf": "http://pdfs.semanticscholar.org/a1c1/970f7c728cc96aea798d65d38df7c9ea61dc.pdf"}, {"id": "e121bf6f18e1cb114216a521df63c55030d10fbe", "title": "Robust Facial Component Detection for Face Alignment Applications", "addresses": [{"address": "Graz University of Technology", "lat": "47.05821000", "lng": "15.46019568", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/e121/bf6f18e1cb114216a521df63c55030d10fbe.pdf"}, {"id": "bc6011807fadc2d3e6bc97bb2c2ecee5ec1b64a8", "title": "Robust Face Recognition from a Single Training Image per Person with Kernel-Based SOM-Face", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/bc60/11807fadc2d3e6bc97bb2c2ecee5ec1b64a8.pdf"}, {"id": "09ef369754fccb530e658b8331c405867c0d45a6", "title": "Comparison of Face Verification Results on the XM2VTS Database", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}, {"address": "Aristotle University of Thessaloniki", "lat": "40.62984145", "lng": "22.95889350", "type": "edu"}, {"address": "University of Sydney", "lat": "-33.88890695", "lng": "151.18943366", "type": "edu"}, {"address": "ETH Z\u00fcrich", "lat": "47.37645340", "lng": "8.54770931", "type": "edu"}], "year": 2000, "pdf": "http://pdfs.semanticscholar.org/09ef/369754fccb530e658b8331c405867c0d45a6.pdf"}, {"id": "ca2e14671f5043dab985dd18e10c5e3f51e2e8be", "title": "Face Recognition by Using Elongated Local Binary Patterns with Average Maximum Distance Gradient Magnitude", "addresses": [{"address": "Hong Kong University of Science and Technology", "lat": "22.33863040", "lng": "114.26203370", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/ca2e/14671f5043dab985dd18e10c5e3f51e2e8be.pdf"}, {"id": "edc6d96ae195897b33c07f5fa428149915b4cf6a", "title": "Face Pose Estimation System by Combining Hybrid Ica-svm Learning and 3d Modeling", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/edc6/d96ae195897b33c07f5fa428149915b4cf6a.pdf"}, {"id": "f65ff9d6d0025f198ac4f924d2f0df121bc51c67", "title": "Overlapping on Partitioned Facial Images", "addresses": [{"address": "Eastern Mediterranean University", "lat": "35.14479945", "lng": "33.90492318", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/f65f/f9d6d0025f198ac4f924d2f0df121bc51c67.pdf"}, {"id": "916498961a51f56a592c3551b0acc25978571fa7", "title": "Optimal landmark detection using shape models and branch and bound", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": 2011, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126275"}, {"id": "4efd58102ff46b7435c9ec6d4fc3dd21d93b15b4", "title": "Matching Software-Generated Sketches to Face Photographs With a Very Deep CNN, Morphed Faces, and Transfer Learning", "addresses": [{"address": "University of Malta", "lat": "35.90232260", "lng": "14.48341890", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/TIFS.2017.2788002"}, {"id": "e26a7e343fe109e2b52d1eeea5b02dae836f3502", "title": "Facial Expression Recognition Utilizing Local Direction-Based Robust Features and Deep Belief Network", "addresses": [{"address": "King Saud University", "lat": "24.72464030", "lng": "46.62335012", "type": "edu"}, {"address": "University of Oslo", "lat": "59.93891665", "lng": "10.72170765", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ACCESS.2017.2676238"}, {"id": "4cb8a691a15e050756640c0a35880cdd418e2b87", "title": "Class-Based Matching of Object Parts", "addresses": [{"address": "Weizmann Institute of Science", "lat": "31.90784990", "lng": "34.81334092", "type": "edu"}], "year": 2004, "pdf": "http://www.vision.caltech.edu/~bart/Publications/2004/BartUllmanClassBasedMatching.pdf"}, {"id": "99b8a24aacaa53fa3f8a7e48734037c7b16f1c40", "title": "A Proposal to Improve the Authentication Process in m-Health Environments", "addresses": [{"address": "Universitat Polit\u00e8cnica de Val\u00e8ncia", "lat": "39.48083760", "lng": "-0.34095220", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ACCESS.2017.2752176"}, {"id": "c2fa83e8a428c03c74148d91f60468089b80c328", "title": "Optimal Mean Robust Principal Component Analysis", "addresses": [{"address": "University of Texas at Arlington", "lat": "32.72836830", "lng": "-97.11201835", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/c2fa/83e8a428c03c74148d91f60468089b80c328.pdf"}, {"id": "1b3e66bef13f114943d460b4f942e941b4761ba2", "title": "Subspace Approximation of Face Recognition Algorithms: An Empirical Study", "addresses": [{"address": "University of South Florida", "lat": "28.05999990", "lng": "-82.41383619", "type": "edu"}, {"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}], "year": 2008, "pdf": "http://www.nist.gov/customcf/get_pdf.cfm?pub_id=890061"}, {"id": "bdc3546ceee0c2bda9debff7de9aa7d53a03fe7d", "title": "Modeling distance functions induced by face recognition algorithms", "addresses": [{"address": "University of South Florida", "lat": "28.05999990", "lng": "-82.41383619", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/bdc3/546ceee0c2bda9debff7de9aa7d53a03fe7d.pdf"}, {"id": "0fbe38527279f49561c0e1c6ff4e8f733fb79bbe", "title": "Integrating Utility into Face De-identification", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/7561/b691eb5e9913e4c3cb11caf2738d58b9c896.pdf"}, {"id": "90ea3a35e946af97372c3f32a170b179fe8352aa", "title": "Discriminant Learning for Face Recognition", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/90ea/3a35e946af97372c3f32a170b179fe8352aa.pdf"}, {"id": "43a03cbe8b704f31046a5aba05153eb3d6de4142", "title": "Towards Robust Face Recognition from Video", "addresses": [{"address": "Oak Ridge National Laboratory", "lat": "35.93006535", "lng": "-84.31240032", "type": "edu"}], "year": 2001, "pdf": "http://pdfs.semanticscholar.org/9594/3329cd6922a869dd6d58ef01e9492879034c.pdf"}, {"id": "cdd2ba6e6436cb5950692702053195a22789d129", "title": "Face-likeness and image variability drive responses in human face-selective ventral regions.", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": "2012", "pdf": "https://pdfs.semanticscholar.org/976c/3b5ad438fb0cf2fb157964e8e6f07a09ad9e.pdf"}, {"id": "b910590a0eb191d03e1aedb3d55c905129e92e6b", "title": "Robust gender classification on unconstrained face images", "addresses": [{"address": "Anhui University", "lat": "31.76909325", "lng": "117.17795091", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2015, "pdf": "http://doi.acm.org/10.1145/2808492.2808570"}, {"id": "dc4089294cb15e071893d24bdf2baa15de5dcb0b", "title": "Feature selection for subject identification in surveillance photos [face recognition applications]", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2004, "pdf": "http://www.comm.toronto.edu/~kostas/Publications2008/pub/proceed/105.pdf"}, {"id": "a80d057099a6ca872508f5d416a8cd67b788506a", "title": "A dissociation between similarity effects in episodic face recognition.", "addresses": [{"address": "University of Newcastle", "lat": "-33.35788990", "lng": "151.37834708", "type": "edu"}], "year": "2009", "pdf": "https://pdfs.semanticscholar.org/a80d/057099a6ca872508f5d416a8cd67b788506a.pdf"}, {"id": "998cdde7c83a50f0abac69c7c3d20f3729a65d00", "title": "Redundancy effects in the perception and memory of visual objects", "addresses": [{"address": "University of Minnesota", "lat": "44.97308605", "lng": "-93.23708813", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/998c/dde7c83a50f0abac69c7c3d20f3729a65d00.pdf"}, {"id": "56c273538a2dbb4cf43c39fa4725592e97ec1681", "title": "Eye Tracking to Enhance Facial Recognition Algorithms", "addresses": [{"address": "Clemson University", "lat": "34.66869155", "lng": "-82.83743476", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/56c2/73538a2dbb4cf43c39fa4725592e97ec1681.pdf"}, {"id": "c1f07ec629be1c6fe562af0e34b04c54e238dcd1", "title": "A Novel Facial Feature Localization Method Using Probabilistic-like Output", "addresses": [{"address": "University of Miami", "lat": "25.71733390", "lng": "-80.27866887", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/c1f0/7ec629be1c6fe562af0e34b04c54e238dcd1.pdf"}, {"id": "5173a20304ea7baa6bfe97944a5c7a69ea72530f", "title": "Best Basis Selection Method Using Learning Weights for Face Recognition", "addresses": [{"address": "Yonsei University", "lat": "37.56004060", "lng": "126.93692480", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/5173/a20304ea7baa6bfe97944a5c7a69ea72530f.pdf"}, {"id": "83e893858d6a6b8abb07d89e9f821f90c2b074ea", "title": "Facial image retrieval based on demographic classification", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2004, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICPR.2004.1334677"}, {"id": "2d8a84a8e661ce3913cb6c05b18984b14ed11dac", "title": "P3: Toward Privacy-Preserving Photo Sharing", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/6fd6/af3864fc5eb62e6328be79bf8174e939efcc.pdf"}, {"id": "643d11703569766bed0a994941ae5f7b3e101659", "title": "Unsupervised Training for 3D Morphable Model Regression", "addresses": [{"address": "Princeton University", "lat": "40.34829285", "lng": "-74.66308325", "type": "edu"}, {"address": "MIT CSAIL", "lat": "42.36194070", "lng": "-71.09043780", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.06098.pdf"}, {"id": "fcfb48b19f37e531a56ae95186a214b05c0b94c7", "title": "FACE RECOGNITION WITH EIGENFACES \u2013 A DETAILED STUDY", "addresses": [{"address": "University of KwaZulu-Natal", "lat": "-29.86742190", "lng": "30.98072720", "type": "edu"}], "year": "2012", "pdf": "https://pdfs.semanticscholar.org/fcfb/48b19f37e531a56ae95186a214b05c0b94c7.pdf"}, {"id": "8c22dc1b494c4612c4ebc61b22a480666cd841d5", "title": "Towards Practical Facial Feature Detection", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}, {"address": "University of Texas at Austin", "lat": "30.28415100", "lng": "-97.73195598", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/b95b/9fcccb23be8948e96f0c110aaaedc0f7334a.pdf"}, {"id": "297c4503a18a959e3a06613d5e7e026ba351b9bf", "title": "Neurolaw: Differential brain activity for black and white faces predicts damage awards in hypothetical employment discrimination cases.", "addresses": [{"address": "Yale University", "lat": "41.25713055", "lng": "-72.98966960", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/297c/4503a18a959e3a06613d5e7e026ba351b9bf.pdf"}, {"id": "4ff4c27e47b0aa80d6383427642bb8ee9d01c0ac", "title": "Deep Convolutional Neural Networks and Support Vector Machines for Gender Recognition", "addresses": [{"address": "University of Groningen", "lat": "53.21967825", "lng": "6.56251482", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/SSCI.2015.37"}, {"id": "90f0e0701b755bbce89cb0e4e3f0a070d49814a0", "title": "Beyond the retina: Evidence for a face inversion effect in the environmental frame of reference", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/90f0/e0701b755bbce89cb0e4e3f0a070d49814a0.pdf"}, {"id": "ede16b198b83d04b52dc3f0dafc11fd82c5abac4", "title": "LBP edge-mapped descriptor using MGM interest points for face recognition", "addresses": [{"address": "National Tsing Hua University", "lat": "24.79254840", "lng": "120.99511830", "type": "edu"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952343"}, {"id": "a6f93435e006328fd0a5dcb7639e771431cc2c37", "title": "Why Some Faces won't be Remembered: Brain Potentials Illuminate Successful Versus Unsuccessful Encoding for Same-Race and Other-Race Faces", "addresses": [{"address": "Northwestern University", "lat": "42.05511640", "lng": "-87.67581113", "type": "edu"}, {"address": "New York University", "lat": "40.72925325", "lng": "-73.99625394", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/c161/7c3c90e4596867d94a00a3a2bb1d55c8843b.pdf"}, {"id": "e2aafdd2f508ee383a0227de9cee00246f251ebf", "title": "Face Matching Under Time Pressure and Task Demands", "addresses": [{"address": "University of Adelaide", "lat": "-34.91892260", "lng": "138.60423668", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/c6f0/53bc5dbdcd89cba842251feaa4bb8b91378b.pdf"}, {"id": "699be9152895977b0b272887320d543c9c7f6157", "title": "Artistic Illumination Transfer for Portraits", "addresses": [{"address": "Beihang University", "lat": "39.98083330", "lng": "116.34101249", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/699b/e9152895977b0b272887320d543c9c7f6157.pdf"}, {"id": "651ea8b030470ab4a70efced154e77028a102713", "title": "Increasing Face Recognition Rate", "addresses": [{"address": "University of KwaZulu-Natal", "lat": "-29.86742190", "lng": "30.98072720", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/651e/a8b030470ab4a70efced154e77028a102713.pdf"}, {"id": "62647a8f8a534db2ccfd0df7d513b4f084231d10", "title": "Weighted SOM-Face: Selecting Local Features for Recognition from Individual Face Image", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}, {"address": "Fudan University", "lat": "31.30104395", "lng": "121.50045497", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/6264/7a8f8a534db2ccfd0df7d513b4f084231d10.pdf"}, {"id": "d31bf8f6f9404a0ab2e601e723b9a07287d0693b", "title": "Feature Space Reduction for Face Recognition with Dual Linear Discriminant Analysis", "addresses": [{"address": "Warsaw University of Technology", "lat": "52.22165395", "lng": "21.00735776", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/d31b/f8f6f9404a0ab2e601e723b9a07287d0693b.pdf"}, {"id": "35cdd4df9f039f475247bf03fdcc605e40683dce", "title": "Eye Detection and Face Recognition Using Evolutionary Computation", "addresses": [{"address": "George Mason University", "lat": "38.83133325", "lng": "-77.30798839", "type": "edu"}], "year": 1998, "pdf": "http://pdfs.semanticscholar.org/35cd/d4df9f039f475247bf03fdcc605e40683dce.pdf"}, {"id": "5c707dc74c3c39674f74dc22f6b6325af456811c", "title": "Restoring occluded regions using FW-PCA for face recognition", "addresses": [{"address": "Tohoku University", "lat": "38.25309450", "lng": "140.87365930", "type": "edu"}], "year": 2012, "pdf": "http://www.aoki.ecei.tohoku.ac.jp/~ito/W13_04.pdf"}, {"id": "a40476d94c5cf1f929ee9514d3761dca00dd774b", "title": "Watch List Face Surveillance Using Transductive Inference", "addresses": [{"address": "George Mason University", "lat": "38.83133325", "lng": "-77.30798839", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/a404/76d94c5cf1f929ee9514d3761dca00dd774b.pdf"}, {"id": "e4691de78d35ed7085311a466b8d02198bf714ac", "title": "The relation between race-related implicit associations and scalp-recorded neural activity evoked by faces from different races.", "addresses": [{"address": "Yale University", "lat": "41.25713055", "lng": "-72.98966960", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/e469/1de78d35ed7085311a466b8d02198bf714ac.pdf"}, {"id": "85639cefb8f8deab7017ce92717674d6178d43cc", "title": "Automatic Analysis of Spontaneous Facial Behavior: A Final Project Report", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": 2001, "pdf": "http://pdfs.semanticscholar.org/8563/9cefb8f8deab7017ce92717674d6178d43cc.pdf"}, {"id": "a967426ec9b761a989997d6a213d890fc34c5fe3", "title": "Relative ranking of facial attractiveness", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": 2013, "pdf": "http://vision.ucsd.edu/sites/default/files/043-wacv.pdf"}, {"id": "055530f7f771bb1d5f352e2758d1242408d34e4d", "title": "A Facial Expression Recognition System from Depth Video", "addresses": [{"address": "SungKyunKwan University", "lat": "37.30031270", "lng": "126.97212300", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/0555/30f7f771bb1d5f352e2758d1242408d34e4d.pdf"}, {"id": "be84d76093a791bf78bed74ef1d7db54abeca878", "title": "Open World Face Recognition with Credibility and Confidence Measures", "addresses": [{"address": "George Mason University", "lat": "38.83133325", "lng": "-77.30798839", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/be84/d76093a791bf78bed74ef1d7db54abeca878.pdf"}, {"id": "ddb49e36570af09d96059b3b6f08f9124aafe24f", "title": "A Non-Iterative Approach to Reconstruct Face Templates from Match Scores", "addresses": [{"address": "University of South Florida", "lat": "28.05999990", "lng": "-82.41383619", "type": "edu"}], "year": 2006, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICPR.2006.129"}, {"id": "98fcf33916a9bb4efdc652541573b2e7ef9e7d87", "title": "Trustworthy Tricksters: Violating a Negative Social Expectation Affects Source Memory and Person Perception When Fear of Exploitation Is High", "addresses": [{"address": "Georgia Southern University", "lat": "32.42143805", "lng": "-81.78450529", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/98fc/f33916a9bb4efdc652541573b2e7ef9e7d87.pdf"}, {"id": "11fa5abb5d5d09efbf9dacae6a6ceb9b2647f877", "title": "DCTNet: A simple learning-free approach for face recognition", "addresses": [{"address": "Yonsei University", "lat": "37.56004060", "lng": "126.93692480", "type": "edu"}], "year": 2015, "pdf": "https://arxiv.org/pdf/1507.02049v3.pdf"}, {"id": "cfdc4d0f8e1b4b9ced35317d12b4229f2e3311ab", "title": "Quaero at TRECVID 2010: Semantic Indexing", "addresses": [{"address": "Karlsruhe Institute of Technology", "lat": "49.10184375", "lng": "8.43312560", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/cfdc/4d0f8e1b4b9ced35317d12b4229f2e3311ab.pdf"}, {"id": "dbc749490275db26337c7e3201027e8cef8e371c", "title": "Multi-band Gradient Component Pattern (MGCP): A New Statistical Feature for Face Recognition", "addresses": [{"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/dbc7/49490275db26337c7e3201027e8cef8e371c.pdf"}, {"id": "65f6d0d91cdf1a77e3c5cb78c7d21f0f4f01f8b5", "title": "PhD Thesis Incremental, Robust, and Efficient Linear Discriminant Analysis Learning", "addresses": [{"address": "Graz University of Technology", "lat": "47.05821000", "lng": "15.46019568", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/65f6/d0d91cdf1a77e3c5cb78c7d21f0f4f01f8b5.pdf"}, {"id": "0e9ea74cf7106057efdb63f275ca6bb838168b0c", "title": "Progressive Principal Component Analysis", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/0e9e/a74cf7106057efdb63f275ca6bb838168b0c.pdf"}, {"id": "d4d2014f05e17869b72f180fd0065358c722ac65", "title": "UNIVERSITY OF CALGARY A MULTIMODAL BIOMETRIC SYSTEM BASED ON RANK LEVEL FUSION by MD. MARUF MONWAR A THESIS SUBMITTED TO THE FACULTY OF GRADUATE STUDIES IN PARTIAL FULFILMENT OF THE REQUIREMENTS FOR THE DEGREE OF DOCTOR OF PHILOSOPHY", "addresses": [{"address": "University of Calgary", "lat": "51.07840380", "lng": "-114.12870770", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/d4d2/014f05e17869b72f180fd0065358c722ac65.pdf"}, {"id": "fdd7c9f3838b8d868911afaafa08beffb79b5228", "title": "An efficient mechanism for compensating vague pattern identification in support of a multi-criteria recommendation system", "addresses": [{"address": "Feng Chia University", "lat": "24.18005755", "lng": "120.64836072", "type": "edu"}, {"address": "National Cheng Kung University", "lat": "22.99919160", "lng": "120.21625134", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/fdd7/c9f3838b8d868911afaafa08beffb79b5228.pdf"}, {"id": "d3d5d86afec84c0713ec868cf5ed41661fc96edc", "title": "A Comprehensive Analysis of Deep Learning Based Representation for Face Recognition", "addresses": [{"address": "Istanbul Technical University", "lat": "41.10427915", "lng": "29.02231159", "type": "edu"}, {"address": "Sabanci University", "lat": "40.89271590", "lng": "29.37863323", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1606.02894.pdf"}, {"id": "20100dbeb2dfebc7595d79755d737b21e75f39a6", "title": "Cluster Indicator Decomposition for Efficient Matrix Factorization", "addresses": [{"address": "University of Texas at Arlington", "lat": "32.72836830", "lng": "-97.11201835", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/2010/0dbeb2dfebc7595d79755d737b21e75f39a6.pdf"}, {"id": "3ca9453d3c023bb81cce72ff2d633fc5075e1df6", "title": "Generic vs. Person Specific Active Appearance Models", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/e36f/5fab8758194fcad043e23288330657fe7742.pdf"}, {"id": "d082f35534932dfa1b034499fc603f299645862d", "title": "TAMING WILD FACES: WEB-SCALE, OPEN-UNIVERSE FACE IDENTIFICATION IN STILL AND VIDEO IMAGERY by ENRIQUE", "addresses": [{"address": "University of Central Florida", "lat": "28.59899755", "lng": "-81.19712501", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/d082/f35534932dfa1b034499fc603f299645862d.pdf"}, {"id": "649b47e02b82afeccc858f1f3dcec98379bfbbbd", "title": "Face Alignment Under Various Poses and Expressions", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/649b/47e02b82afeccc858f1f3dcec98379bfbbbd.pdf"}, {"id": "7264c2a8900c2ab41575578eb2d50557b2829f84", "title": "Silhouetted face profiles: a new methodology for face perception research.", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/7264/c2a8900c2ab41575578eb2d50557b2829f84.pdf"}, {"id": "9887ab220254859ffc7354d5189083a87c9bca6e", "title": "Generic Image Classification Approaches Excel on Face Recognition", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}, {"address": "University of Adelaide", "lat": "-34.91892260", "lng": "138.60423668", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/9887/ab220254859ffc7354d5189083a87c9bca6e.pdf"}, {"id": "838ed2aae603dec5851ebf5e4bc64b54db7f34be", "title": "Real-Time Ensemble Based Face Recognition System for Humanoid Robots", "addresses": [{"address": "University of Tartu", "lat": "58.38131405", "lng": "26.72078081", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/838e/d2aae603dec5851ebf5e4bc64b54db7f34be.pdf"}, {"id": "6fe83b5fdeeb6d92f24af3aed6a34c5bf9ce8845", "title": "Face Recognition Based on Local Directional Pattern Variance (LDPv)", "addresses": [{"address": "Kyung Hee University", "lat": "32.85363330", "lng": "-117.20352860", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/6fe8/3b5fdeeb6d92f24af3aed6a34c5bf9ce8845.pdf"}, {"id": "6e177341d4412f9c9a639e33e6096344ef930202", "title": "A Gender Recognition System from Facial Image", "addresses": [{"address": "Institute of Information Technology", "lat": "23.72898990", "lng": "90.39826820", "type": "edu"}, {"address": "University of Dhaka", "lat": "23.73169570", "lng": "90.39652750", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/2e58/ec57d71b2b2a3e71086234dd7037559cc17e.pdf"}, {"id": "327eab70296d39511d61e91c6839446d59f5e119", "title": "Roadmap for Reliable Ensemble Forecasting of the Sun-Earth System", "addresses": [{"address": "New Jersey Institute of Technology", "lat": "40.74230250", "lng": "-74.17928172", "type": "edu"}, {"address": "University of Hawaii", "lat": "21.29827950", "lng": "-157.81869230", "type": "edu"}, {"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}, {"address": "George Mason University", "lat": "38.83133325", "lng": "-77.30798839", "type": "edu"}, {"address": "California Institute of Technology", "lat": "34.13710185", "lng": "-118.12527487", "type": "edu"}, {"address": "University of Michigan", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu"}, {"address": "Utah State University", "lat": "41.74115040", "lng": "-111.81223090", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/327e/ab70296d39511d61e91c6839446d59f5e119.pdf"}, {"id": "b29f348e8675f75ff160ec65ebeeb3f3979b65d8", "title": "An objective and subjective evaluation of content-based privacy protection of face images in video surveillance systems using JPEG XR", "addresses": [{"address": "Korea Advanced Institute of Science and Technology", "lat": "36.36971910", "lng": "127.36253700", "type": "edu"}, {"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/b29f/348e8675f75ff160ec65ebeeb3f3979b65d8.pdf"}, {"id": "1c2724243b27a18a2302f12dea79d9a1d4460e35", "title": "Fisher+Kernel criterion for discriminant analysis", "addresses": [{"address": "Peking University", "lat": "39.99223790", "lng": "116.30393816", "type": "edu"}, {"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2005, "pdf": "http://read.pudn.com/downloads157/doc/697237/kfd/Fisher+Kernel%20criterion%20for%20discriminant%20analysis.pdf"}, {"id": "d16f37a15f6385a6a189b06833745da5d524f69b", "title": "Hebb repetition effects for non-verbal visual sequences: determinants of sequence acquisition.", "addresses": [{"address": "Bournemouth University", "lat": "50.74223495", "lng": "-1.89433739", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/d16f/37a15f6385a6a189b06833745da5d524f69b.pdf"}, {"id": "13791aa7c1047724c4046eee94e66a506b211eb9", "title": "Real-time Gender Classification", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/1379/1aa7c1047724c4046eee94e66a506b211eb9.pdf"}, {"id": "fa72e39971855dff6beb8174b5fa654e0ab7d324", "title": "A depth video-based facial expression recognition system using radon transform, generalized discriminant analysis, and hidden Markov model", "addresses": [{"address": "SungKyunKwan University", "lat": "37.30031270", "lng": "126.97212300", "type": "edu"}, {"address": "King Saud University", "lat": "24.72464030", "lng": "46.62335012", "type": "edu"}], "year": 2013, "pdf": "https://doi.org/10.1007/s11042-013-1793-1"}, {"id": "3843b8c4143e9f1e50c61eb462376e65861bbf24", "title": "Color Image Processing Using Reduced Biquaternions with Application to Face Recognition in a PCA Framework", "addresses": [{"address": "Assiut University", "lat": "27.18794105", "lng": "31.17009498", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.359"}, {"id": "0cc3c62f762d64cffcab4ac7fea3896cb22a3df9", "title": "Preserving Privacy by De-identifying Facial Images", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/d30f/cc0e4c2c78cc5ff7bbd1227d3952d366a479.pdf"}, {"id": "2cae2ca6221fbfa9655e41ac52e54631ada7ad2c", "title": "Electoral College and Direct Popular Vote for Multi-Candidate Election", "addresses": [{"address": "University of Northern British Columbia", "lat": "53.89256620", "lng": "-122.81471592", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/ffd6/14925a326efcb27ef52accd5638a912b4792.pdf"}, {"id": "328bfd1d0229bc4973277f893abd1eb288159fc9", "title": "A review of the literature on the aging adult skull and face: implications for forensic science research and applications.", "addresses": [{"address": "University of North Carolina at Wilmington", "lat": "34.22498270", "lng": "-77.86907744", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/328b/fd1d0229bc4973277f893abd1eb288159fc9.pdf"}, {"id": "18b4e9e51ee14c9d816358fbe1af29f0771b7916", "title": "Intelligent environments and active camera networks", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": 2000, "pdf": "http://pdfs.semanticscholar.org/18b4/e9e51ee14c9d816358fbe1af29f0771b7916.pdf"}, {"id": "0dde6981047067692793b71a2f7ad6a8708741d8", "title": "MODELING PHYSICAL PERSONALITIES FOR VIRTUAL AGENTS BY MODELING TRAIT IMPRESSIONS OF THE FACE: A NEURAL NETWORK ANALYSIS by SHERYL BRAHNAM", "addresses": [{"address": "City University of New York", "lat": "40.87228250", "lng": "-73.89489171", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/0dde/6981047067692793b71a2f7ad6a8708741d8.pdf"}, {"id": "20675281008211641d28ce0f2b6946537a8535c4", "title": "Multi-resolution Histograms of Local Variation Patterns (MHLVP) for Robust Face Recognition", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/2067/5281008211641d28ce0f2b6946537a8535c4.pdf"}, {"id": "c22df6df55f5c6539e1a4d2e2d50dbaab34007a7", "title": "Compact Binary Patterns (CBP) with Multiple Patch Classifiers for Fast and Accurate Face Recognition", "addresses": [{"address": "University of Nottingham", "lat": "52.93874280", "lng": "-1.20029569", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/c22d/f6df55f5c6539e1a4d2e2d50dbaab34007a7.pdf"}, {"id": "2e6e335e591da1e8899ff53f9a7ddb4c63520104", "title": "Is All Face Processing Holistic? The View from UCSD", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}, {"address": "University of Iowa", "lat": "41.66590000", "lng": "-91.57310307", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/528a/6698911ff30aa648af4d0a5cf0dd9ee90b5c.pdf"}, {"id": "9c1b132243e0dcacde1717ce1cfe730a74bd8cbc", "title": "Hippocampus Is Place of Interaction between Unconscious and Conscious Memories", "addresses": [{"address": "University of Geneva", "lat": "42.57054745", "lng": "-88.55578627", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/9c1b/132243e0dcacde1717ce1cfe730a74bd8cbc.pdf"}, {"id": "4fb9f05dc03eb4983d8f9a815745bb47970f1b93", "title": "On Robust Face Recognition via Sparse Encoding: the Good, the Bad, and the Ugly", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}, {"address": "Queensland University of Technology", "lat": "-27.47715625", "lng": "153.02841004", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/f4ee/4f7ac7585f7ea0db3b27c5ad016dbfb0feac.pdf"}, {"id": "b9df25cc4be2f703b059da93823bad6e8e8c0659", "title": "Local Gabor Binary Pattern Whitened PCA: A Novel Approach for Face Recognition from Single Image Per Person", "addresses": [{"address": "University of Nottingham", "lat": "52.93874280", "lng": "-1.20029569", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/b9df/25cc4be2f703b059da93823bad6e8e8c0659.pdf"}, {"id": "57ba4b6de23a6fc9d45ff052ed2563e5de00b968", "title": "An efficient deep neural networks training framework for robust face recognition", "addresses": [{"address": "Xiamen University", "lat": "24.43994190", "lng": "118.09301781", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296993"}, {"id": "90bd16caa44086db6f0e4bbc1dde7063cb71b7b8", "title": "Structured Doubly Stochastic Matrix for Graph Based Clustering: Structured Doubly Stochastic Matrix", "addresses": [{"address": "University of Texas at Arlington", "lat": "32.72836830", "lng": "-97.11201835", "type": "edu"}], "year": 2016, "pdf": "http://www.kdd.org/kdd2016/papers/files/rfp1162-wangA.pdf"}, {"id": "15d1582c8b65dbab5ca027467718a2c286ddce7a", "title": "On robust face recognition via sparse coding: the good, the bad and the ugly", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}, {"address": "Queensland University of Technology", "lat": "-27.47715625", "lng": "153.02841004", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/15d1/582c8b65dbab5ca027467718a2c286ddce7a.pdf"}, {"id": "e19a4dadf60848309c8fd7445d97918da654df76", "title": "JPEG Compressed Domain Face Recognition : Different Stages and Different Features", "addresses": [{"address": "Islamic Azad University", "lat": "34.84529990", "lng": "48.55962120", "type": "edu"}], "year": "2013", "pdf": "https://pdfs.semanticscholar.org/e19a/4dadf60848309c8fd7445d97918da654df76.pdf"}, {"id": "d1633dc3706580c8b9d98c4c0dfa9f9a29360ca3", "title": "Empirically Analyzing the Effect of Dataset Biases on Deep Face Recognition Systems", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1712.01619.pdf"}, {"id": "e104e213faa97d9a9c8b8e1f15b7431c601cb250", "title": "Modeling of facial aging and kinship: A survey", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Middlesex University", "lat": "51.59029705", "lng": "-0.22963221", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1802.04636.pdf"}, {"id": "da6696345d0d4ff6328c1c5916b0ca870d4cc6cf", "title": "Robust Contrast-Invariant EigenDetection", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/da66/96345d0d4ff6328c1c5916b0ca870d4cc6cf.pdf"}, {"id": "3b3550680136aa2fe3bd57c9faa3bfa0dfb3e748", "title": "Forensic Face Recognition: a Survey", "addresses": [{"address": "University of Twente", "lat": "52.23801390", "lng": "6.85667610", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/3b35/50680136aa2fe3bd57c9faa3bfa0dfb3e748.pdf"}, {"id": "4ba3f9792954ee3ba894e1e330cd77da4668fa22", "title": "Nearest Neighbor Discriminant Analysis", "addresses": [{"address": "Fudan University", "lat": "31.30104395", "lng": "121.50045497", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/4ba3/f9792954ee3ba894e1e330cd77da4668fa22.pdf"}, {"id": "472ba8dd4ec72b34e85e733bccebb115811fd726", "title": "Cosine Similarity Metric Learning for Face Verification", "addresses": [{"address": "University of Nottingham", "lat": "52.93874280", "lng": "-1.20029569", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/472b/a8dd4ec72b34e85e733bccebb115811fd726.pdf"}, {"id": "ba9e967208976f24a09730af94086e7ae0417067", "title": "An Open Source Framework for Standardized Comparisons of Face Recognition Algorithms", "addresses": [{"address": "IDIAP Research Institute", "lat": "46.10923700", "lng": "7.08453549", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/f369/03d22a463876b895bbe37b5f9ad235a38edd.pdf"}, {"id": "4d527974512083712c9adf26a923b44d7e426b44", "title": "Impact of Image Quality on Performance: Comparison of Young and Elderly Fingerprints", "addresses": [{"address": "Purdue University", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/4d52/7974512083712c9adf26a923b44d7e426b44.pdf"}, {"id": "e96ce25d11296fce4e2ecc2da03bd207dc118724", "title": "Classification of face images using local iterated function systems", "addresses": [{"address": "Deakin University", "lat": "-38.19928505", "lng": "144.30365229", "type": "edu"}], "year": 2007, "pdf": "https://doi.org/10.1007/s00138-007-0095-x"}, {"id": "fcd2fb1ada96218dcc2547efa040e76416cc7066", "title": "Perceptual data mining: bootstrapping visual intelligence from tracking behavior", "addresses": [{"address": "Northwestern University", "lat": "42.05511640", "lng": "-87.67581113", "type": "edu"}, {"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/fcd2/fb1ada96218dcc2547efa040e76416cc7066.pdf"}, {"id": "97930609f1a5066fd437ed8a4e57abbfb1ae4b12", "title": "Best Practices in Testing and Reporting Performance of Biometric Devices", "addresses": [{"address": "San Jose State University", "lat": "37.33519080", "lng": "-121.88126008", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/bef4/03c136beaa6fd43fc3184d4666512daaf9e5.pdf"}, {"id": "985dc9b8b003483f6df363a8ce07dd8c89ced903", "title": "3D Morphable Face Model, a Unified Approach for Analysis and Synthesis of Images", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": "", "pdf": "http://pdfs.semanticscholar.org/985d/c9b8b003483f6df363a8ce07dd8c89ced903.pdf"}, {"id": "1057137d8ebbbfc4e816d74edd7ab04f61a893f8", "title": "Craniofacial Aging", "addresses": [{"address": "University of North Carolina Wilmington", "lat": "34.23755810", "lng": "-77.92701290", "type": "edu"}, {"address": "Virginia Commonwealth University", "lat": "37.54821500", "lng": "-77.45306424", "type": "edu"}], "year": "2008", "pdf": "https://pdfs.semanticscholar.org/1057/137d8ebbbfc4e816d74edd7ab04f61a893f8.pdf"}, {"id": "005d818ff8517669d62ba7b536e76b56698fa135", "title": "Neural Network-Based Face Detection", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 1996, "pdf": "http://pdfs.semanticscholar.org/4d7e/e94f164cce28a8bfef4417e9a99265b02b54.pdf"}, {"id": "0c85d1b384bb6e2d5d6e4db5461a7101ceed6808", "title": "Engineering Privacy in Public: Confounding Face Recognition", "addresses": [{"address": "University of Pennsylvania", "lat": "39.94923440", "lng": "-75.19198985", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/0ff8/d39a962ed902e1c995815ade265ea903d218.pdf"}, {"id": "9107543d9a9d915c92fe4139932c5d818cfc187d", "title": "Investigation of New Techniques for Face Detection", "addresses": [{"address": "Virginia Polytechnic Institute and State University", "lat": "37.21872455", "lng": "-80.42542519", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/9107/543d9a9d915c92fe4139932c5d818cfc187d.pdf"}, {"id": "b3e856729f89b082b4108561479ff09394bb6553", "title": "Pose Robust Video - Based Face Recognition", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/b3e8/56729f89b082b4108561479ff09394bb6553.pdf"}, {"id": "d1836e137787fadb28d3418e029534765bcf1dae", "title": "Analysis , Synthesis and Recognition of Human Faces with Pose Variations", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": 2001, "pdf": "http://pdfs.semanticscholar.org/d183/6e137787fadb28d3418e029534765bcf1dae.pdf"}, {"id": "2fd1c99edbb3d22cec4adc9ba9319cfc2360e903", "title": "Rotation Invariant Neural Network-Based Face Detection", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 1998, "pdf": "http://pdfs.semanticscholar.org/98c8/ca05ed5baff5b217c571ab5c5a0ee0706e27.pdf"}, {"id": "b6145d3268032da70edc9cfececa1f9ffa4e3f11", "title": "Face Recognition Using the Discrete Cosine Transform", "addresses": [{"address": "McGill University", "lat": "45.50397610", "lng": "-73.57496870", "type": "edu"}], "year": 2001, "pdf": "http://cnl.salk.edu/~zhafed/papers/fr_IJCV_2001.pdf"}, {"id": "01b73cfd803f0bdeab8bbfc26cd1ed110c762c91", "title": "Facial Recognition Technology A Survey of Policy and Implementation Issues", "addresses": [{"address": "Lancaster University", "lat": "54.00975365", "lng": "-2.78757491", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/01b7/3cfd803f0bdeab8bbfc26cd1ed110c762c91.pdf"}, {"id": "c9579768d142a7020d095090183805c98a2f78e5", "title": "The Bochum/USC Face Recognition System and How it Fared in the FERET Phase III Test", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": "", "pdf": "http://pdfs.semanticscholar.org/e30d/b2331efa48f6c60330d492210ed6395774f2.pdf"}, {"id": "42fe5666599f35b805657e829e8f9093ee95b908", "title": "Pose-Tolerant Face Recognition", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/42fe/5666599f35b805657e829e8f9093ee95b908.pdf"}, {"id": "29c7dfbbba7a74e9aafb6a6919629b0a7f576530", "title": "Automatic Facial Expression Analysis and Emotional Classification", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/29c7/dfbbba7a74e9aafb6a6919629b0a7f576530.pdf"}, {"id": "f6a65be9a3790e8fd3b5116450a47a8e48a54d63", "title": "Parametric Piecewise Linear Subspace Method for Processing Facial Images with 3D Pose Variations", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": "", "pdf": "http://pdfs.semanticscholar.org/f6a6/5be9a3790e8fd3b5116450a47a8e48a54d63.pdf"}, {"id": "07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1", "title": "Large scale unconstrained open set face database", "addresses": [{"address": "University of Colorado at Colorado Springs", "lat": "38.89646790", "lng": "-104.80505940", "type": "edu"}], "year": 2013, "pdf": "http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf"}, {"id": "f7f19ac1c4e38c104045c306f5ddac6329193d8c", "title": "Measuring External Face Appearance for Face Classification", "addresses": [{"address": "Universitat Aut\u00f2noma de Barcelona", "lat": "41.50078110", "lng": "2.11143663", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/f7f1/9ac1c4e38c104045c306f5ddac6329193d8c.pdf"}, {"id": "57bd46b16644be40b2e0dc595c1aaa6abbadba89", "title": "Overview of Work in Empirical Evaluation of Computer Vision Algorithms", "addresses": [{"address": "University of South Florida", "lat": "28.05999990", "lng": "-82.41383619", "type": "edu"}, {"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/c3f7/6fe32a0ca448f1ce7004198827df48bf827b.pdf"}, {"id": "fc83a26beb38b17af737c4ff34141d0deea3a4e1", "title": "The Challenges of the Environment and the Human / Biometric Device Interaction on Biometric System Performance", "addresses": [{"address": "Purdue University", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/fc83/a26beb38b17af737c4ff34141d0deea3a4e1.pdf"}, {"id": "1e3739716e163fce6fded71eda078a18334aa83b", "title": "The HFB Face Database for Heterogeneous Face Biometrics research", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2009, "pdf": "https://doi.org/10.1109/CVPRW.2009.5204149"}, {"id": "a0d6390dd28d802152f207940c7716fe5fae8760", "title": "Bayesian Face Revisited: A Joint Formulation", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/a0d6/390dd28d802152f207940c7716fe5fae8760.pdf"}, {"id": "b19ca50a9e2415072a97482005fe0b77a8a495ce", "title": "Hierarchical Direct Appearance Model for Elastic Labeled Graph Localization", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/b19c/a50a9e2415072a97482005fe0b77a8a495ce.pdf"}, {"id": "5bf954ca82b42865c49eef4b064278b82f3b38de", "title": "Re-engaging with the past: recapitulation of encoding operations during episodic retrieval", "addresses": [{"address": "University of Edinburgh", "lat": "55.94951105", "lng": "-3.19534913", "type": "edu"}, {"address": "University of Calgary", "lat": "51.07840380", "lng": "-114.12870770", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/80b0/045eed3a1fc9ab502963f6fb3e6f70a2f638.pdf"}, {"id": "ca458f189c1167e42d3a5aaf81efc92a4c008976", "title": "Double Shrinking Sparse Dimension Reduction", "addresses": [{"address": "University of Technology", "lat": "-33.88405040", "lng": "151.19922540", "type": "edu"}], "year": 2013, "pdf": "https://doi.org/10.1109/TIP.2012.2202678"}, {"id": "b20a8fc556aed9ab798fcf31e4f971dbc67a9edf", "title": "An Adept Segmentation Algorithm and Its Application to the Extraction of Local Regions Containing Fiducial Points", "addresses": [{"address": "Eastern Mediterranean University", "lat": "35.14479945", "lng": "33.90492318", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/b20a/8fc556aed9ab798fcf31e4f971dbc67a9edf.pdf"}, {"id": "80290f2a38741e20a38de7c00d80353604343ef8", "title": "Eigenfeature Optimization for Face Detection", "addresses": [{"address": "University of Calgary", "lat": "51.07840380", "lng": "-114.12870770", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/8029/0f2a38741e20a38de7c00d80353604343ef8.pdf"}, {"id": "4a24d41aef0041ef82916d2316eea86f6c45c47f", "title": "Impact of Full Rank Principal Component Analysis on Classification Algorithms for Face Recognition", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}, {"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/4a24/d41aef0041ef82916d2316eea86f6c45c47f.pdf"}, {"id": "7c7fb5c70bdabe8442c46c791fb2db00c490410b", "title": "Human Face Recognition using Gabor based Kernel Entropy Component Analysis", "addresses": [{"address": "Indian Statistical Institute, Kolkata", "lat": "22.64815210", "lng": "88.37681700", "type": "edu"}, {"address": "Jadavpur University", "lat": "22.56115370", "lng": "88.41310194", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/7c7f/b5c70bdabe8442c46c791fb2db00c490410b.pdf"}, {"id": "19e62a56b6772bbd37dfc6b8f948e260dbb474f5", "title": "Cross-Domain Metric Learning Based on Information Theory", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/19e6/2a56b6772bbd37dfc6b8f948e260dbb474f5.pdf"}, {"id": "c1cf5dda56c72b65e86f3a678f76644f22212748", "title": "Face Hallucination via Semi-kernel Partial Least Squares", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/c1cf/5dda56c72b65e86f3a678f76644f22212748.pdf"}, {"id": "e69ac130e3c7267cce5e1e3d9508ff76eb0e0eef", "title": "Addressing the illumination challenge in two-dimensional face recognition: a survey", "addresses": [{"address": "University of Houston", "lat": "29.72079020", "lng": "-95.34406271", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/e69a/c130e3c7267cce5e1e3d9508ff76eb0e0eef.pdf"}, {"id": "e39a66a6d1c5e753f8e6c33cd5d335f9bc9c07fa", "title": "Weakly Supervised Learning for Unconstrained Face Processing", "addresses": [{"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}], "year": "2014", "pdf": "https://pdfs.semanticscholar.org/e39a/66a6d1c5e753f8e6c33cd5d335f9bc9c07fa.pdf"}, {"id": "bc9003ad368cb79d8a8ac2ad025718da5ea36bc4", "title": "Facial expression recognition with a three-dimensional face model", "addresses": [{"address": "Technical University Munich", "lat": "48.14955455", "lng": "11.56775314", "type": "edu"}], "year": "2011", "pdf": "https://pdfs.semanticscholar.org/bc90/03ad368cb79d8a8ac2ad025718da5ea36bc4.pdf"}, {"id": "d6b0a1f6dfb995436b45045b56e966d8e57b0990", "title": "Gait analysis and recognition for automated visual surveillance", "addresses": [{"address": "University of Southampton", "lat": "50.89273635", "lng": "-1.39464295", "type": "edu"}], "year": "2008", "pdf": "https://pdfs.semanticscholar.org/d6b0/a1f6dfb995436b45045b56e966d8e57b0990.pdf"}, {"id": "02ae77f4c289426f18e83ce6e295d39538fb0fcc", "title": "Dependency Modeling for Information Fusion with Applications in Visual Recognition", "addresses": [{"address": "Hong Kong Baptist University", "lat": "22.38742010", "lng": "114.20822220", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/02ae/77f4c289426f18e83ce6e295d39538fb0fcc.pdf"}, {"id": "078549cb5474b024d203f96954646cacef219682", "title": "Single Image Face Recognition based on Gabor, Sobel and Local Ternary Pattern", "addresses": [{"address": "Jahangirnagar University", "lat": "23.88331200", "lng": "90.26939210", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/1b42/0d5cf66e60b540ecdb352a287c85d9d7e2a4.pdf"}, {"id": "3dfb822e16328e0f98a47209d7ecd242e4211f82", "title": "Cross-Age LFW: A Database for Studying Cross-Age Face Recognition in Unconstrained Environments", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.08197.pdf"}, {"id": "1320c42b348c5342c2ad6a60e3ded3ff0bd56f7f", "title": "A Viewpoint Invariant, Sparsely Registered, Patch Based, Face\u00a0Verifier", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2007, "pdf": "https://doi.org/10.1007/s11263-007-0119-z"}, {"id": "17cf6195fd2dfa42670dc7ada476e67b381b8f69", "title": "Automatic Face Region Tracking for Highly Accurate Face Recognition in Unconstrained Environments", "addresses": [{"address": "Chung-Ang University", "lat": "37.50882000", "lng": "126.96190000", "type": "edu"}, {"address": "Korea Electronics Technology Institute", "lat": "37.40391700", "lng": "127.15978600", "type": "edu"}, {"address": "University of Tennessee", "lat": "35.95424930", "lng": "-83.93073950", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/17cf/6195fd2dfa42670dc7ada476e67b381b8f69.pdf"}, {"id": "b5930275813a7e7a1510035a58dd7ba7612943bc", "title": "Face Recognition Using L-Fisherfaces", "addresses": [{"address": "Beijing Jiaotong University", "lat": "39.94976005", "lng": "116.33629046", "type": "edu"}, {"address": "Institute of Information Science", "lat": "25.04107280", "lng": "121.61475620", "type": "edu"}, {"address": "Shandong University of Science and Technology", "lat": "36.00146435", "lng": "120.11624057", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/b593/0275813a7e7a1510035a58dd7ba7612943bc.pdf"}, {"id": "281cc188bf7588681cdf8e325b0ed13ac927e2e6", "title": "A Multi-Modal Person Recognition System for Social Robots", "addresses": [{"address": "Fraser University", "lat": "44.96898360", "lng": "-93.20941629", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/281c/c188bf7588681cdf8e325b0ed13ac927e2e6.pdf"}, {"id": "89ac06ccbc410224f4d05d5ae8fa46c4fe3cbe0f", "title": "Video Based Face Verification", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2001, "pdf": "http://pdfs.semanticscholar.org/947e/53c1d9035df85a3bc1b852928acbe889daf4.pdf"}, {"id": "72b4b8f4a9f25cac5686231b44a2220945fd2ff6", "title": "Face Verification Using Modeled Eigenspectrum", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/72b4/b8f4a9f25cac5686231b44a2220945fd2ff6.pdf"}, {"id": "95289007f2f336e6636cf8f920225b8d47c6e94f", "title": "Automatic Training Image Acquisition and Effective Feature Selection From Community-Contributed Photos for Facial Attribute Detection", "addresses": [{"address": "National Taiwan University", "lat": "25.01682835", "lng": "121.53846924", "type": "edu"}, {"address": "Academia Sinica, Taiwan", "lat": "25.04117270", "lng": "121.61465180", "type": "edu"}], "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6472796"}, {"id": "749ebfa344b6d27de898d619cea0b28ad3894ff2", "title": "Predicting Biometric Authentication System Performance Across Different Application Conditions: A Bootstrap Enhanced Parametric Approach", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/749e/bfa344b6d27de898d619cea0b28ad3894ff2.pdf"}, {"id": "e3bb87e858bc752436c7a8da3fca68b2dacbf3e8", "title": "On the Evaluation of Methods for the Recovery of Plant Root Systems from X-ray Computed Tomography Images", "addresses": [{"address": "University of Nottingham", "lat": "52.93874280", "lng": "-1.20029569", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/e3bb/87e858bc752436c7a8da3fca68b2dacbf3e8.pdf"}, {"id": "a94cae786d515d3450d48267e12ca954aab791c4", "title": "YawDD: a yawning detection dataset", "addresses": [{"address": "University of Ottawa", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu"}], "year": 2014, "pdf": "http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf"}, {"id": "8a3bb63925ac2cdf7f9ecf43f71d65e210416e17", "title": "ShearFace: Efficient Extraction of Anisotropic Features for Face Recognition", "addresses": [{"address": "University of Sfax, Tunisia", "lat": "34.73610660", "lng": "10.74272750", "type": "edu"}], "year": 2014, "pdf": "https://www.math.uh.edu/~dlabate/ShearFace_ICPR2014.pdf"}, {"id": "31dd6bafd6e7c6095eb8d0591abac3b0106a75e3", "title": "Face Recognition In Unconstrained Environment", "addresses": [{"address": "Khon Kaen University", "lat": "16.46007565", "lng": "102.81211798", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8457336"}, {"id": "beea33ccd9423d48d6cfb928469bbe7841e63e73", "title": "DIARETDB1 diabetic retinopathy database and evaluation protocol", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/beea/33ccd9423d48d6cfb928469bbe7841e63e73.pdf"}, {"id": "4cf0c6d3da8e20d6f184a4eaa6865d61680982b8", "title": "Face recognition based on 3D mesh model", "addresses": [{"address": "Hong Kong University of Science and Technology", "lat": "22.33863040", "lng": "114.26203370", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/4cf0/c6d3da8e20d6f184a4eaa6865d61680982b8.pdf"}, {"id": "ba6082291b018b14f8da4f96afc631918bad3a1b", "title": "Calibration , Recognition , and Shape from Silhouettes of Stones", "addresses": [{"address": "University of Cape Town", "lat": "-33.95828745", "lng": "18.45997349", "type": "edu"}], "year": "2007", "pdf": "https://pdfs.semanticscholar.org/3f5b/0cf2ed392045026ea0d1d67145d0400e516f.pdf"}, {"id": "b1e218046a28d10ec0be3272809608dea378eddc", "title": "Overview of the Multiple Biometrics Grand Challenge", "addresses": [{"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}], "year": "2009", "pdf": "https://pdfs.semanticscholar.org/12c5/66e2eee7bbaf45b894e7282f87f00f1db20a.pdf"}, {"id": "15122ef718265beb4cb1a74e5d1f41c5edcb4ba5", "title": "On the Euclidean distance of images", "addresses": [{"address": "Peking University", "lat": "39.99223790", "lng": "116.30393816", "type": "edu"}], "year": 2005, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2005.165"}, {"id": "9d6e60d49e92361f8f558013065dfa67043dd337", "title": "Applications of Computational Geometry and Computer Vision", "addresses": [{"address": "Central Washington University", "lat": "47.00646895", "lng": "-120.53673040", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/9d6e/60d49e92361f8f558013065dfa67043dd337.pdf"}, {"id": "121839d3254820b7017b07ef47acc89b975286a9", "title": "Feature Extraction for Incomplete Data via Low-rank Tucker Decomposition", "addresses": [{"address": "Hong Kong Baptist University", "lat": "22.38742010", "lng": "114.20822220", "type": "edu"}, {"address": "Guangdong University of Technology", "lat": "23.13538360", "lng": "113.29470496", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/92a2/5b281f1637d125cefefcbfc382f48f456f4c.pdf"}, {"id": "88ed558bff3600f5354963d1abe762309f66111e", "title": "Real-World and Rapid Face Recognition Toward Pose and Expression Variations via Feature Library Matrix", "addresses": [{"address": "Amirkabir University of Technology", "lat": "35.70451400", "lng": "51.40972058", "type": "edu"}, {"address": "Semnan University", "lat": "35.60374440", "lng": "53.43445877", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/TIFS.2015.2393553"}, {"id": "016a8ed8f6ba49bc669dbd44de4ff31a79963078", "title": "Face relighting for face recognition under generic illumination", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2004, "pdf": "https://doi.org/10.1109/ICASSP.2004.1327215"}, {"id": "44f48a4b1ef94a9104d063e53bf88a69ff0f55f3", "title": "Automatically Building Face Datasets of New Domains from Weakly Labeled Data with Pretrained Models", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/44f4/8a4b1ef94a9104d063e53bf88a69ff0f55f3.pdf"}, {"id": "2fd007088a75916d0bf50c493d94f950bf55c5e6", "title": "Projective Representation Learning for Discriminative Face Recognition", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1007/978-981-10-7302-1_1"}, {"id": "0cb613bf519b90d08d2f12623b41f02c638cea63", "title": "Face annotation for personal photos using context-assisted face recognition", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2008, "pdf": "http://koasas.kaist.ac.kr/bitstream/10203/22675/1/Face%20Annotation%20for%20Personal%20Photos%20Using%20Context%20Assisted%20Face%20Recognition.pdf"}, {"id": "30b6811205b42e92d7a82c606d4521319764250b", "title": "Low cost illumination invariant face recognition by down-up sampling self quotient image", "addresses": [{"address": "National Tsing Hua University", "lat": "24.79254840", "lng": "120.99511830", "type": "edu"}], "year": 2013, "pdf": "https://doi.org/10.1109/APSIPA.2013.6694367"}, {"id": "ff69da3510f5ffed224069faf62036e1aa9b6d26", "title": "Extended Set of Local Binary Patterns for Rapid Object Detection", "addresses": [{"address": "Czech Technical University", "lat": "50.07642960", "lng": "14.41802312", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/a256/3501ffd5a840fa4df0f3911a82e117df2f7f.pdf"}, {"id": "c207fd762728f3da4cddcfcf8bf19669809ab284", "title": "Face Alignment Using Boosting and Evolutionary Search", "addresses": [{"address": "Southeast University", "lat": "32.05752790", "lng": "118.78682252", "type": "edu"}, {"address": "University of Twente", "lat": "52.23801390", "lng": "6.85667610", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/c207/fd762728f3da4cddcfcf8bf19669809ab284.pdf"}, {"id": "23a450a075d752f1ec2b1e5e225de13d3bc37636", "title": "Subspace Learning in Krein Spaces: Complete Kernel Fisher Discriminant Analysis with Indefinite Kernels", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/23a4/50a075d752f1ec2b1e5e225de13d3bc37636.pdf"}, {"id": "521c2e9892eb22f65ba5b0d4c8d2f4c096d9fdf3", "title": "Model-Based Face De-Identification", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Robotics Institute", "lat": "13.65450525", "lng": "100.49423171", "type": "edu"}], "year": 2006, "pdf": "http://www.ri.cmu.edu/pub_files/pub4/gross_ralph_2006_2/gross_ralph_2006_2.pdf"}, {"id": "91e507d2d8375bf474f6ffa87788aa3e742333ce", "title": "Robust Face Recognition Using Probabilistic Facial Trait Code", "addresses": [{"address": "National Taiwan University", "lat": "25.01682835", "lng": "121.53846924", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/91e5/07d2d8375bf474f6ffa87788aa3e742333ce.pdf"}, {"id": "744b794f0047b008c517752fc9bb1100e5f120cc", "title": "Multiple-exemplar discriminant analysis for face recognition", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2004, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICPR.2004.1333736"}, {"id": "44d93039eec244083ac7c46577b9446b3a071f3e", "title": "Empirical comparisons of several preprocessing methods for illumination insensitive face recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": "2005", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1415571"}, {"id": "2d435b7510eeda648dc34d5b8ac921499d525218", "title": "Improving Variance Estimation in Biometric Systems", "addresses": [{"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}, {"address": "University of Colorado at Colorado Springs", "lat": "38.89646790", "lng": "-104.80505940", "type": "edu"}], "year": 2007, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2007.383395"}, {"id": "ab7bcbaa9e77d35634302b021d47e7889628a88d", "title": "FACESKETCHID: A SYSTEM FOR FACIAL SKETCH TO MUGSHOT MATCHING by Scott", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/ab7b/cbaa9e77d35634302b021d47e7889628a88d.pdf"}, {"id": "92017bf2df5f6532d39c624ea209f37bb6728097", "title": "Attention Driven Face Recognition, Learning from Human Vision System", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "Peking University", "lat": "39.99223790", "lng": "116.30393816", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/9201/7bf2df5f6532d39c624ea209f37bb6728097.pdf"}, {"id": "841bf196ee0086c805bd5d1d0bddfadc87e424ec", "title": "Locally Kernel-based Nonlinear Regression for Face Recognition", "addresses": [{"address": "Amirkabir University of Technology", "lat": "35.70451400", "lng": "51.40972058", "type": "edu"}, {"address": "Islamic Azad University", "lat": "34.84529990", "lng": "48.55962120", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/841b/f196ee0086c805bd5d1d0bddfadc87e424ec.pdf"}, {"id": "40055c342c19ab492df04dae2e186cd0d6b5dc5e", "title": "Robust representations for face recognition: the power of averages.", "addresses": [{"address": "University of Glasgow", "lat": "55.87231535", "lng": "-4.28921784", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/a406/ad4bdf50f696191e7472b7a41d9d57ff046c.pdf"}, {"id": "c444c4dab97dd6d6696f56c1cacda051dde60448", "title": "Multiview Face Detection and Registration Requiring Minimal Manual Intervention", "addresses": [{"address": "A*STAR, Singapore", "lat": "1.29889260", "lng": "103.78731070", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2013, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.37"}, {"id": "235bebe7d0db37e6727dfa1246663be34027d96b", "title": "General Type-2 fuzzy edge detectors applied to face recognition systems", "addresses": [{"address": "Tijuana Institute of Technology, Mexico", "lat": "32.87853490", "lng": "-117.23583070", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/NAFIPS.2016.7851625"}, {"id": "65b1760d9b1541241c6c0222cc4ee9df078b593a", "title": "Enhanced Pictorial Structures for Precise Eye Localization Under Uncontrolled Conditions", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/65b1/760d9b1541241c6c0222cc4ee9df078b593a.pdf"}, {"id": "27b9e75bcaf9e12127f7181bcb7f1fcb105462c4", "title": "Local frequency descriptor for low-resolution face recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": 2011, "pdf": "http://www.cbsr.ia.ac.cn/users/zlei/papers/LEI-LFD-FG-11.pdf"}, {"id": "aecd24f4a41eb6942375b9c03adcb7e137250b3f", "title": "Tensor Sparse Coding for Region Covariances", "addresses": [{"address": "University of Minnesota", "lat": "44.97308605", "lng": "-93.23708813", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/aecd/24f4a41eb6942375b9c03adcb7e137250b3f.pdf"}, {"id": "179f446aa297d6fe5c864b605286b946f85bb4ee", "title": "Fusion of static and dynamic body biometrics for gait recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2003, "pdf": "http://lear.inrialpes.fr/people/triggs/events/iccv03/cdrom/iccv03/1449_wang.pdf"}, {"id": "3d741315108b95cdb56d312648f5ad1c002c9718", "title": "Image-based face recognition under illumination and pose variations.", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/3d74/1315108b95cdb56d312648f5ad1c002c9718.pdf"}, {"id": "8ca3cfb9595ebc5b36a25659f6bbf362f0b14ae3", "title": "Spectral Clustering Based Null Space Linear Discriminant Analysis (SNLDA)", "addresses": [{"address": "Fudan University", "lat": "31.30104395", "lng": "121.50045497", "type": "edu"}, {"address": "Beijing Jiaotong University", "lat": "39.94976005", "lng": "116.33629046", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/8ca3/cfb9595ebc5b36a25659f6bbf362f0b14ae3.pdf"}, {"id": "908a899c716d63bd327dee4a72061db5674bdc92", "title": "Experiments with Face Recognition Using a Novel Approach Based on CVQ Technique", "addresses": [{"address": "Islamic Azad University", "lat": "34.84529990", "lng": "48.55962120", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/908a/899c716d63bd327dee4a72061db5674bdc92.pdf"}, {"id": "9f5383ec6ee5e810679e4a7e0a3f153f0ed3bb73", "title": "3D Shape and Pose Estimation of Face Images Using the Nonlinear Least-Squares Model", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/9f53/83ec6ee5e810679e4a7e0a3f153f0ed3bb73.pdf"}, {"id": "07c90e85ac0f74b977babe245dea0f0abcf177e3", "title": "An Image Preprocessing Algorithm for Illumination Invariant Face Recognition", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/07c9/0e85ac0f74b977babe245dea0f0abcf177e3.pdf"}, {"id": "0faab61c742609be74463d30b0eb1118dba4a4f3", "title": "Null Space Approach of Fisher Discriminant Analysis for Face Recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/0faa/b61c742609be74463d30b0eb1118dba4a4f3.pdf"}, {"id": "1319dbeaa28f8a9b19e03a7631e96393e08a07fa", "title": "Gender Recognition Using Fusion of Local and Global Facial Features", "addresses": [{"address": "King Saud University", "lat": "24.72464030", "lng": "46.62335012", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/1319/dbeaa28f8a9b19e03a7631e96393e08a07fa.pdf"}, {"id": "48381007b85e8a3b74e5401b2dfc1a5dfc897622", "title": "Sparse Representation and Dictionary Learning for Biometrics and Object Tracking", "addresses": [{"address": "University of Miami", "lat": "25.71733390", "lng": "-80.27866887", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/4838/1007b85e8a3b74e5401b2dfc1a5dfc897622.pdf"}, {"id": "852e7df8794b15413f1d71628939c3cc28580b12", "title": "Boosted Audio-Visual HMM for Speech Reading", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/852e/7df8794b15413f1d71628939c3cc28580b12.pdf"}, {"id": "c5c1575565e04cd0afc57d7ac7f7a154c573b38f", "title": "Face Refinement through a Gradient Descent Alignment Approach", "addresses": [{"address": "Robotics Institute", "lat": "13.65450525", "lng": "100.49423171", "type": "edu"}], "year": "2006", "pdf": "https://pdfs.semanticscholar.org/010a/f49ddb10c51b7913c2533910dd28ca39411c.pdf"}, {"id": "da7bbfa905d88834f8929cb69f41a1b683639f4b", "title": "Discriminant analysis with Gabor phase for robust face recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "HoHai University", "lat": "32.05765485", "lng": "118.75500040", "type": "edu"}, {"address": "Xidian University", "lat": "34.12358250", "lng": "108.83546000", "type": "edu"}], "year": "2012", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6199752"}, {"id": "546cbbb897022096511f6a71259e3b99c558224d", "title": "PCA vs. ICA: A Comparison on the FERET Data Set", "addresses": [{"address": "Colorado State University", "lat": "40.57093580", "lng": "-105.08655256", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/8a17/e16de6b932ec42e269621e29d99e46591fef.pdf"}, {"id": "6e7afe55d363adf80330116968163c7e9500f53b", "title": "SVD-based projection for face recognition", "addresses": [{"address": "National Tsing Hua University", "lat": "24.79254840", "lng": "120.99511830", "type": "edu"}], "year": 2007, "pdf": "http://www.cs.nthu.edu.tw/~cchen/Research/2007EitFace.pdf"}, {"id": "2a9946fb626a58d376fb1491ca8bf8fb4f68dcf9", "title": "Enlarge the Training Set Based on Inter-Class Relationship for Face Recognition from One Image per Person", "addresses": [{"address": "Shenzhen University", "lat": "22.53521465", "lng": "113.93159110", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/2a99/46fb626a58d376fb1491ca8bf8fb4f68dcf9.pdf"}, {"id": "38c61c11554135e09a2353afa536d010c7a53cbb", "title": "Learning the Detection of Faces in Natural Images", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/38c6/1c11554135e09a2353afa536d010c7a53cbb.pdf"}, {"id": "6bfb0f8dd1a2c0b44347f09006dc991b8a08559c", "title": "Multiview discriminative learning for age-invariant face recognition", "addresses": [{"address": "Lomonosov Moscow State University", "lat": "55.70229715", "lng": "37.53179777", "type": "edu"}, {"address": "Advanced Digital Sciences Center, Singapore", "lat": "1.30372570", "lng": "103.77377630", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}, {"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": 2013, "pdf": "https://www.computer.org/web/csdl/index/-/csdl/proceedings/fg/2013/5545/00/06553724.pdf"}, {"id": "d33b26794ea6d744bba7110d2d4365b752d7246f", "title": "Transfer Feature Representation via Multiple Kernel Learning", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/d33b/26794ea6d744bba7110d2d4365b752d7246f.pdf"}, {"id": "55f94957f753e74f6f0170a45dee746c5b013edb", "title": "Face Recognition Using Balanced Pairwise Classifier Training", "addresses": [{"address": "University of Kent", "lat": "51.29753440", "lng": "1.07296165", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/55f9/4957f753e74f6f0170a45dee746c5b013edb.pdf"}, {"id": "6dbe76f51091ca6a626a62846a946ce687c3dbe8", "title": "INCREMENTAL OBJECT MATCHING WITH PROBABILISTIC METHODS Doctoral dissertation", "addresses": [{"address": "Aalto University", "lat": "60.18558755", "lng": "24.82427330", "type": "edu"}], "year": "", "pdf": "http://pdfs.semanticscholar.org/6dbe/76f51091ca6a626a62846a946ce687c3dbe8.pdf"}, {"id": "314ad104401c78a83cfe8018412b6a2f33340fc6", "title": "Privacy protecting, intelligibility preserving video surveillance", "addresses": [{"address": "EURECOM", "lat": "43.61438600", "lng": "7.07112500", "type": "edu"}], "year": 2016, "pdf": "http://www.eurecom.fr/fr/publication/4966/download/sec-publi-4966.pdf"}, {"id": "08f6ad0a3e75b715852f825d12b6f28883f5ca05", "title": "Face recognition: Some challenges in forensics", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2011, "pdf": "http://www.cse.msu.edu/biometrics/Publications/Face/JainKlarePark_FaceRecognition_ChallengesinForensics_FG11.pdf"}, {"id": "3b64efa817fd609d525c7244a0e00f98feacc8b4", "title": "A Comprehensive Survey on Pose-Invariant Face Recognition", "addresses": [{"address": "University of Technology Sydney", "lat": "-33.88096510", "lng": "151.20107299", "type": "edu"}], "year": 2016, "pdf": "http://doi.acm.org/10.1145/2845089"}, {"id": "102cfd088799405d47c824735dc1356e5835dce7", "title": "Learning-based Face Synthesis for Pose-Robust Recognition from Single Image", "addresses": [{"address": "Australian National University", "lat": "-35.27769990", "lng": "149.11852700", "type": "edu"}, {"address": "University of Canberra", "lat": "-35.23656905", "lng": "149.08446994", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/d5d0/d25663ec0ff8099e613d2278f8a673b9729f.pdf"}, {"id": "fb6cc23fd6bd43bd4cacf6a57cd2c7c8dfe5269d", "title": "An experimental study on content-based face annotation of photos", "addresses": [{"address": "National Taiwan Normal University", "lat": "25.00823205", "lng": "121.53577153", "type": "edu"}], "year": "2009", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339084"}, {"id": "2910fcd11fafee3f9339387929221f4fc1160973", "title": "Evaluating Open-Universe Face Identification on the Web", "addresses": [{"address": "University of Central Florida", "lat": "28.59899755", "lng": "-81.19712501", "type": "edu"}, {"address": "Robotics Institute", "lat": "13.65450525", "lng": "100.49423171", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W16/papers/Becker_Evaluating_Open-Universe_Face_2013_CVPR_paper.pdf"}, {"id": "e0ea8ef91bd0a35aec31c9a493137163b4f042b6", "title": "Sparse representation with nearest subspaces for face recognition", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}, {"address": "Shenzhen University", "lat": "22.53521465", "lng": "113.93159110", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/e0ea/8ef91bd0a35aec31c9a493137163b4f042b6.pdf"}, {"id": "29639a071f67a6867000b53bcb97b37b3d090319", "title": "Gait Identification Considering Body Tilt by Walking Direction Changes", "addresses": [{"address": "Osaka University", "lat": "34.80809035", "lng": "135.45785218", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/2963/9a071f67a6867000b53bcb97b37b3d090319.pdf"}, {"id": "b161d261fabb507803a9e5834571d56a3b87d147", "title": "Gender recognition from face images using a geometric descriptor", "addresses": [{"address": "University of Campinas (UNICAMP)", "lat": "-22.81483740", "lng": "-47.06477080", "type": "edu"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8122913"}, {"id": "5539c0bee8fcf825e63a1abaa950615ebd9c6b49", "title": "Car Detection and Recognition Based on Rear View and Back Light Features", "addresses": [{"address": "Indian Institute of Technology Delhi", "lat": "28.54632595", "lng": "77.27325504", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/5539/c0bee8fcf825e63a1abaa950615ebd9c6b49.pdf"}, {"id": "691463f3f7acb0502e21b40958c1ecdee16d1fe0", "title": "Adaptive Markov Random Fields for Example-Based Super-resolution of Faces", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/eb46/25ad9143196021c3def560d025d346c46909.pdf"}, {"id": "0a2ddf88bd1a6c093aad87a8c7f4150bfcf27112", "title": "Patch-based models for visual object classes", "addresses": [{"address": "University College London", "lat": "51.52316070", "lng": "-0.12820370", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/0a2d/df88bd1a6c093aad87a8c7f4150bfcf27112.pdf"}, {"id": "82d5e927c4f1429c07552bfc7bebd5f0e3f2f444", "title": "Histogram Sequence of Local Gabor Binary Pattern for Face Description and Identification", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/82d5/e927c4f1429c07552bfc7bebd5f0e3f2f444.pdf"}, {"id": "e9a8a88b47d0bc20579f39eba1c380b07edc244f", "title": "Effects of the Facial and Racial Features on Gender Classification", "addresses": [{"address": "Istanbul Technical University", "lat": "41.10427915", "lng": "29.02231159", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/e9a8/a88b47d0bc20579f39eba1c380b07edc244f.pdf"}, {"id": "982fcead58be419e4f34df6e806204674a4bc579", "title": "Performance improvement of face recognition algorithms using occluded-region detection", "addresses": [{"address": "Azbil Corporation, Kawana, Japan", "lat": "35.33414870", "lng": "139.49433560", "type": "company"}, {"address": "Tohoku University", "lat": "38.25309450", "lng": "140.87365930", "type": "edu"}], "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6613012"}, {"id": "61f4429c085e8a93c4d7bdb9bff6fac38e58e5c6", "title": "Discriminant Neighborhood Structure Embedding Using Trace Ratio Criterion for Image Recognition", "addresses": [{"address": "Xidian University", "lat": "34.12358250", "lng": "108.83546000", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/61f4/429c085e8a93c4d7bdb9bff6fac38e58e5c6.pdf"}, {"id": "ec645bbc34d3ed264516df8b1add4d0cd6c35631", "title": "An improved Bayesian face recognition algorithm in PCA subspace", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/ec64/5bbc34d3ed264516df8b1add4d0cd6c35631.pdf"}, {"id": "3356074f4896bf2af7f46749fdc212a99d4932a6", "title": "Learning Low-Rank Class-Specific Dictionary and Sparse Intra-Class Variant Dictionary for Face Recognition", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}, {"address": "Zhejiang University of Technology", "lat": "30.29315340", "lng": "120.16204580", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/3356/074f4896bf2af7f46749fdc212a99d4932a6.pdf"}, {"id": "7f1f3d7b1a4e7fc895b77cb23b1119a6f13e4d3a", "title": "Multi-subregion based probabilistic approach toward pose-invariant face recognition", "addresses": [{"address": "Robotics Institute", "lat": "13.65450525", "lng": "100.49423171", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/7f1f/3d7b1a4e7fc895b77cb23b1119a6f13e4d3a.pdf"}, {"id": "1ab19e516b318ed6ab64822efe9b2328836107a4", "title": "Face Recognition System Using Multiple Face Model of Hybrid Fourier Feature Under Uncontrolled Illumination Variation", "addresses": [{"address": "Mando Corp.", "lat": "35.90775700", "lng": "127.76692200", "type": "company"}, {"address": "Samsung", "lat": "37.56653500", "lng": "126.97796920", "type": "company"}, {"address": "Samsung SAIT, Beijing", "lat": "39.90419990", "lng": "116.40739630", "type": "company"}], "year": 2011, "pdf": "https://doi.org/10.1109/TIP.2010.2083674"}, {"id": "e506cdb250eba5e70c5147eb477fbd069714765b", "title": "Heterogeneous Face Recognition", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2012", "pdf": "https://pdfs.semanticscholar.org/e506/cdb250eba5e70c5147eb477fbd069714765b.pdf"}, {"id": "97dbcc592ed048db545c6e9ed1f27372e8d1d4b8", "title": "Omnidirectional Gait Identification by Tilt Normalization and Azimuth View Transformation", "addresses": [{"address": "Osaka University", "lat": "34.80809035", "lng": "135.45785218", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/97db/cc592ed048db545c6e9ed1f27372e8d1d4b8.pdf"}, {"id": "50b40ec042047b4292fd9b650969d4efbd20c9ed", "title": "Optimal gradient pursuit for face alignment", "addresses": [{"address": "GE Global Research Center", "lat": "42.82982480", "lng": "-73.87719385", "type": "edu"}], "year": 2011, "pdf": "http://cse.msu.edu/~liuxm/publication/Liu_GradientPursuit_FG2011.pdf"}, {"id": "7b47eb8faaf9c2275cdc70299b850ed649ceec62", "title": "1D-LDA vs. 2D-LDA: When is vector-based linear discriminant analysis better than matrix-based?", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/7b47/eb8faaf9c2275cdc70299b850ed649ceec62.pdf"}, {"id": "38e7f3fe450b126367ec358be9b4cc04e82fa8c7", "title": "Maximal Likelihood Correspondence Estimation for Face Recognition Across Pose", "addresses": [{"address": "OMRON Corporation, Kyoto, Japan", "lat": "35.01163630", "lng": "135.76802940", "type": "company"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/TIP.2014.2351265"}, {"id": "ffa23a8c988e57cf5fc21b56b522a4ee68f2f362", "title": "Social game retrieval from unstructured videos", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/ffa2/3a8c988e57cf5fc21b56b522a4ee68f2f362.pdf"}, {"id": "307c5c0a61e318a65bd65af694ce89c275fd7299", "title": "Face Mis-alignment Analysis by Multiple-Instance Subspace", "addresses": [{"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/307c/5c0a61e318a65bd65af694ce89c275fd7299.pdf"}, {"id": "43b6fb3146cb92bc36a2aab1368d8665af106a87", "title": "ASePPI, an adaptive scrambling enabling privacy protection and intelligibility in H.264/AVC", "addresses": [{"address": "EURECOM", "lat": "43.61438600", "lng": "7.07112500", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.23919/EUSIPCO.2017.8081347"}, {"id": "4b605e6a9362485bfe69950432fa1f896e7d19bf", "title": "A Comparison of Human and Automated Face Verification Accuracy on Unconstrained Image Sets", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2016, "pdf": "http://biometrics.cse.msu.edu/Publications/Face/BlantonAllenMillerKalkaJain_CVPRWB2016_HID.pdf"}, {"id": "2cdf5952b5a1bea5d24917aa2f3fc2ee33568e9a", "title": "Autoencoding the retrieval relevance of medical images", "addresses": [{"address": "University of Waterloo", "lat": "43.47061295", "lng": "-80.54724732", "type": "edu"}], "year": 2015, "pdf": "https://arxiv.org/pdf/1507.01251v1.pdf"}, {"id": "2a392cbdb2ac977ad9f969659111e20bd0e9611f", "title": "Supplementary Material for Privacy Preserving Optics for Miniature Vision Sensors", "addresses": [{"address": "University of Florida", "lat": "29.63287840", "lng": "-82.34901330", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/2a39/2cbdb2ac977ad9f969659111e20bd0e9611f.pdf"}, {"id": "7d61b70d922d20c52a4e629b09465076af71ddfd", "title": "Nonnegative class-specific entropy component analysis with adaptive step search criterion", "addresses": [{"address": "University of Macau", "lat": "22.12401870", "lng": "113.54510901", "type": "edu"}], "year": 2011, "pdf": "https://doi.org/10.1007/s10044-011-0258-2"}, {"id": "bde276015ba6677f0ec5fbfc97d5c57daca9d391", "title": "An Evaluation of Face and Ear Biometrics", "addresses": [{"address": "University of South Florida", "lat": "28.05999990", "lng": "-82.41383619", "type": "edu"}, {"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/bde2/76015ba6677f0ec5fbfc97d5c57daca9d391.pdf"}, {"id": "856cc83a3121de89d4a6d9283afbcd5d7ef7aa2b", "title": "Image-to-Set Face Recognition Using Locality Repulsion Projections and Sparse Reconstruction-Based Similarity Measure", "addresses": [{"address": "Advanced Digital Sciences Center, Singapore", "lat": "1.30372570", "lng": "103.77377630", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6417014"}, {"id": "759a3b3821d9f0e08e0b0a62c8b693230afc3f8d", "title": "Attribute and simile classifiers for face verification", "addresses": [{"address": "Columbia University", "lat": "40.84198360", "lng": "-73.94368971", "type": "edu"}], "year": 2009, "pdf": "http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf"}, {"id": "871e6c1de2e0ba86bad8975b8411ad76a6a9aef9", "title": "Geometric Modeling of 3D-Face Features and Its Applications", "addresses": [{"address": "Indian Institute of Technology Kanpur", "lat": "26.51318800", "lng": "80.23651945", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/871e/6c1de2e0ba86bad8975b8411ad76a6a9aef9.pdf"}, {"id": "8aa85d2f81d7496cf7105ee0a3785f140ddaa367", "title": "Efficient processing of MRFs for unconstrained-pose face recognition", "addresses": [{"address": "Urmia University", "lat": "37.52914535", "lng": "45.04886077", "type": "edu"}, {"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2013, "pdf": "http://www.csis.pace.edu/~ctappert/dps/2013BTAS/Papers/Paper%2019/PID2859743.pdf"}, {"id": "674e739709537f0e562b6cf114f15a5cc57fde7e", "title": "Nonsubsampled Contourlet Transform Based Descriptors for Gender Recognition", "addresses": [{"address": "King Saud University", "lat": "24.72464030", "lng": "46.62335012", "type": "edu"}], "year": 2014, "pdf": "http://www.cse.unr.edu/~bebis/CGIV2014.pdf"}, {"id": "062cea54e5d58ee41aea607cbf2ba0cf457aa4e7", "title": "The DIARETDB1 Diabetic Retinopathy Database and Evaluation Protocol", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/062c/ea54e5d58ee41aea607cbf2ba0cf457aa4e7.pdf"}, {"id": "555f75077a02f33a05841f9b63a1388ec5fbcba5", "title": "A Survey on Periocular Biometrics Research", "addresses": [{"address": "Halmstad University", "lat": "56.66340325", "lng": "12.87929727", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1810.03360.pdf"}, {"id": "892db59add66fc581ae1a7338ff8bd6b7aa0f2b4", "title": "FPGA-based Normalization for Modified Gram-Schmidt Orthogonalization", "addresses": [{"address": "New Jersey Institute of Technology", "lat": "40.74230250", "lng": "-74.17928172", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/892d/b59add66fc581ae1a7338ff8bd6b7aa0f2b4.pdf"}, {"id": "019f1462c1b7101100334e4c421d35feea612492", "title": "Running Head : UNFAMILIAR FACE MATCHING The Effects of External Features and Time Pressure on Unfamiliar Face Matching", "addresses": [{"address": "University of Adelaide", "lat": "-34.91892260", "lng": "138.60423668", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/019f/1462c1b7101100334e4c421d35feea612492.pdf"}, {"id": "10c79df4f44b5e4c08f984f34370d292f31ef309", "title": "Multi-Modal 2D and 3D Biometrics for Face Recognition", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/10c7/9df4f44b5e4c08f984f34370d292f31ef309.pdf"}, {"id": "3514f66f155c271981a734f1523572edcd8fd10e", "title": "A complementary local feature descriptor for face identification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}, {"address": "University of Campinas", "lat": "-27.59539950", "lng": "-48.61542180", "type": "edu"}], "year": 2012, "pdf": "http://www.umiacs.umd.edu/~jhchoi/paper/wacv2012_slide.pdf"}, {"id": "aa4d1ad6fd2dbc05139b8121b500c2b1f6b35bec", "title": "Grassmann Registration Manifolds for Face Recognition", "addresses": [{"address": "Colorado State University", "lat": "40.57093580", "lng": "-105.08655256", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/aa4d/1ad6fd2dbc05139b8121b500c2b1f6b35bec.pdf"}, {"id": "c79cf7f61441195404472102114bcf079a72138a", "title": "Pose-Invariant 2 D Face Recognition by Matching Using Graphical Models", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/9704/8d901389535b122f82a6a949bd8f596790f2.pdf"}, {"id": "1acf8970598bb2443fd2dd42ceeca1eb3f2fc613", "title": "Boosting Statistical Local Feature Based Classifiers for Face Recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": "2005", "pdf": "https://pdfs.semanticscholar.org/1acf/8970598bb2443fd2dd42ceeca1eb3f2fc613.pdf"}, {"id": "a489a7951c7848ebae5a99ac590c016359a85434", "title": "Attribute-Guided Sketch Generation", "addresses": [{"address": "University of Trento", "lat": "46.06588360", "lng": "11.11598940", "type": "edu"}, {"address": "Huazhong University of Science and Technology", "lat": "30.50975370", "lng": "114.40628810", "type": "edu"}, {"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}, {"address": "University of Michigan", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu"}], "year": "2019", "pdf": "https://arxiv.org/pdf/1901.09774.pdf"}, {"id": "0b55b31765f101535eac0d50b9da377f82136d2f", "title": "Biometric binary string generation with detection rate optimized bit allocation", "addresses": [{"address": "University of Twente", "lat": "52.23801390", "lng": "6.85667610", "type": "edu"}], "year": 2008, "pdf": "http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/WorkShops/data/papers/163.pdf"}, {"id": "bd7477c250f01f63f438c4f3bebe374caf4b86ba", "title": "Real-time Face and Hand Detection for Videoconferencing on a Mobile Device", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/bd74/77c250f01f63f438c4f3bebe374caf4b86ba.pdf"}, {"id": "9039b8097a78f460db9718bc961fdc7d89784092", "title": "3D Face Recognition Based on Local Shape Patterns and Sparse Representation Classifier", "addresses": [{"address": "Beihang University", "lat": "39.98083330", "lng": "116.34101249", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/9039/b8097a78f460db9718bc961fdc7d89784092.pdf"}, {"id": "ee458bee26e6371f9347b1972bbc9dc26b2f3713", "title": "Stacking-based deep neural network: Deep analytic network on convolutional spectral histogram features", "addresses": [{"address": "Yonsei University", "lat": "37.56004060", "lng": "126.93692480", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1703.01396.pdf"}, {"id": "80d42f74ee9bf03f3790c8d0f5a307deffe0b3b7", "title": "Learning Kernel Extended Dictionary for Face Recognition", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TNNLS.2016.2522431"}, {"id": "2af19b5ff2ca428fa42ef4b85ddbb576b5d9a5cc", "title": "Multi-Region Probabilistic Histograms for Robust and Scalable Identity Inference", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/2af1/9b5ff2ca428fa42ef4b85ddbb576b5d9a5cc.pdf"}, {"id": "778c1e95b6ea4ccf89067b83364036ab08797256", "title": "Exploring Patterns of Gradient Orientations and Magnitudes for Face Recognition", "addresses": [{"address": "VESALIS SAS, France", "lat": "45.75976430", "lng": "3.13102130", "type": "company"}], "year": 2013, "pdf": "https://doi.org/10.1109/TIFS.2012.2224866"}, {"id": "b9504e4a2f40f459b5e83143e77f4972c7888445", "title": "Experimental Analysis of Face Recognition on Still and CCTV Images", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": 2008, "pdf": "http://conradsanderson.id.au/pdfs/chen_avss_2008.pdf"}, {"id": "1da1299088a6bf28167c58bbd46ca247de41eb3c", "title": "Face identification from a single example image based on Face-Specific Subspace (FSS)", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2002, "pdf": "https://doi.org/10.1109/ICASSP.2002.5745055"}, {"id": "3ca25a9e906b851df01a53f4443d66978a0243b8", "title": "Improved Super-Resolution through Residual Neighbor Embedding", "addresses": [{"address": "Fudan University", "lat": "31.30104395", "lng": "121.50045497", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/3ca2/5a9e906b851df01a53f4443d66978a0243b8.pdf"}, {"id": "64fd48fae4d859583c4a031b51ce76ecb5de614c", "title": "Illuminated face normalization technique by using wavelet fusion and local binary patterns", "addresses": [{"address": "Yonsei University", "lat": "37.56004060", "lng": "126.93692480", "type": "edu"}, {"address": "Multimedia University", "lat": "2.92749755", "lng": "101.64185301", "type": "edu"}], "year": 2008, "pdf": "https://doi.org/10.1109/ICARCV.2008.4795556"}, {"id": "70d2ab1af0edd5c0a30d576a5d4aa397c4f92d3e", "title": "Elastic preserving projections based on L1-norm maximization", "addresses": [{"address": "Beihang University", "lat": "39.98083330", "lng": "116.34101249", "type": "edu"}], "year": "2018", "pdf": "http://doi.org/10.1007/s11042-018-5608-2"}, {"id": "9cda3e56cec21bd8f91f7acfcefc04ac10973966", "title": "Periocular biometrics: databases, algorithms and directions", "addresses": [{"address": "Halmstad University", "lat": "56.66340325", "lng": "12.87929727", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/IWBF.2016.7449688"}, {"id": "569008018f0b9c4abb8b5c662a6710a1fc38b5a6", "title": "Face Similarity Space as Perceived by Humans and Artificial Systems", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 1998, "pdf": "http://pdfs.semanticscholar.org/5690/08018f0b9c4abb8b5c662a6710a1fc38b5a6.pdf"}, {"id": "76dff7008d9b8bf44ec5348f294d5518877c6182", "title": "Discrete area filters in accurate detection of faces and facial features", "addresses": [{"address": "Warsaw University of Technology", "lat": "52.22165395", "lng": "21.00735776", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1016/j.imavis.2014.09.004"}, {"id": "4bc2352b087bdc99ef5f00453e5d2272d522524c", "title": "Investigating the Impact of Face Categorization on Recognition Performance", "addresses": [{"address": "University of Nevada", "lat": "39.54694490", "lng": "-119.81346566", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/4bc2/352b087bdc99ef5f00453e5d2272d522524c.pdf"}, {"id": "6250781bb606041fdc1621ba08aee541bfb1285b", "title": "Ear Biometrics Using 2D and 3D Images", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": 2005, "pdf": "http://www.cse.nd.edu/Reports/2004/TR-2004-31.pdf"}, {"id": "f6fa68847e0ce7fda05a9c73ebcb484f0b42a9af", "title": "Face Recognition Across Pose and Illumination", "addresses": [{"address": "Robotics Institute", "lat": "13.65450525", "lng": "100.49423171", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/f6fa/68847e0ce7fda05a9c73ebcb484f0b42a9af.pdf"}, {"id": "3a34c622c1af4b181e99d4a58f7870314944d2c4", "title": "D View - Invariant Face Recognition Using a Hierarchical Pose - Normalization Strategy", "addresses": [{"address": "McGill University", "lat": "45.50397610", "lng": "-73.57496870", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/3a34/c622c1af4b181e99d4a58f7870314944d2c4.pdf"}, {"id": "ece80165040e9d8304c5dd808a6cdb29c8ecbf5b", "title": "Looking at People Using Partial Least Squares", "addresses": [{"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/a2f6/8e5898364ac7c1d4691d23fab716ad672712.pdf"}, {"id": "ae1de0359f4ed53918824271c888b7b36b8a5d41", "title": "Low-cost Automatic Inpainting for Artifact Suppression in Facial Images", "addresses": [{"address": "University of Groningen", "lat": "53.21967825", "lng": "6.56251482", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/ae1d/e0359f4ed53918824271c888b7b36b8a5d41.pdf"}, {"id": "1e46d0714398904e557f27022908121fa8a7902f", "title": "Baseline Evaluations on the CAS-PEAL-R1 Face Database", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/1e46/d0714398904e557f27022908121fa8a7902f.pdf"}, {"id": "bec31269632c17206deb90cd74367d1e6586f75f", "title": "Large-scale Datasets: Faces with Partial Occlusions and Pose Variations in the Wild", "addresses": [{"address": "Wayne State University", "lat": "42.35775700", "lng": "-83.06286711", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/bec3/1269632c17206deb90cd74367d1e6586f75f.pdf"}, {"id": "e08038b14165536c52ffe950d90d0f43be9c8f15", "title": "Smart Augmentation Learning an Optimal Data Augmentation Strategy", "addresses": [{"address": "National University of Ireland Galway", "lat": "53.27639715", "lng": "-9.05829961", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1703.08383.pdf"}, {"id": "edf01e1c84e2f80500fd74da69f428617f2a1665", "title": "Gender recognition from faces using bandlet and local binary patterns", "addresses": [{"address": "King Saud University", "lat": "24.72464030", "lng": "46.62335012", "type": "edu"}], "year": 2013, "pdf": "http://www.cse.unr.edu/~bebis/IWSSIP2013.pdf"}, {"id": "6b3e360b80268fda4e37ff39b7f303e3684e8719", "title": "FACE RECOGNITION FROM SKETCHES USING ADVANCED CORRELATION FILTERS USING HYBRID EIGENANALYSIS FOR FACE SYNTHESIS", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2006", "pdf": null}, {"id": "987feaa36f3bb663ac9fa767718c6a90ea0dab3f", "title": "A Distributed System for Supporting Spatio-temporal Analysis on Large-scale Camera Networks", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}, {"address": "University of Stuttgart", "lat": "48.90953380", "lng": "9.18318920", "type": "edu"}, {"address": "SUNY Buffalo", "lat": "42.93362780", "lng": "-78.88394479", "type": "edu"}], "year": "2012", "pdf": "https://pdfs.semanticscholar.org/987f/eaa36f3bb663ac9fa767718c6a90ea0dab3f.pdf"}, {"id": "2feb7c57d51df998aafa6f3017662263a91625b4", "title": "Feature Selection for Intelligent Transportation Systems", "addresses": [{"address": "University of Newcastle", "lat": "-33.35788990", "lng": "151.37834708", "type": "edu"}], "year": "2014", "pdf": "https://pdfs.semanticscholar.org/d344/9eaaf392fd07b676e744410049f4095b4b5c.pdf"}, {"id": "c3558f67b3f4b618e6b53ce844faf38240ee7cd7", "title": "Collaboratively Weighting Deep and Classic Representation via $l_2$ Regularization for Image Classification", "addresses": [{"address": "University of Macau", "lat": "22.12401870", "lng": "113.54510901", "type": "edu"}, {"address": "University of Southampton", "lat": "50.89273635", "lng": "-1.39464295", "type": "edu"}, {"address": "Jiangsu University", "lat": "32.20302965", "lng": "119.50968362", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1802.07589.pdf"}, {"id": "a7678cce6bfca4a34feee5564c87c80fe192a0fd", "title": "The Weakly Identifying System for Doorway Monitoring", "addresses": [{"address": "Duke University", "lat": "35.99905220", "lng": "-78.92906290", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/a767/8cce6bfca4a34feee5564c87c80fe192a0fd.pdf"}, {"id": "550289407a642e81e1ef9dc0476117ed7816e9b5", "title": "Conditional Infomax Learning: An Integrated Framework for Feature Extraction and Fusion", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "Microsoft Research Asia", "lat": "39.97721700", "lng": "116.33763200", "type": "company"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/5502/89407a642e81e1ef9dc0476117ed7816e9b5.pdf"}, {"id": "6577d30abd8bf5b21901572504bd82101a7eed75", "title": "Ear Biometrics in Human", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/6577/d30abd8bf5b21901572504bd82101a7eed75.pdf"}, {"id": "65293ecf6a4c5ab037a2afb4a9a1def95e194e5f", "title": "Face , Age and Gender Recognition using Local Descriptors", "addresses": [{"address": "University of Ottawa", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/6529/3ecf6a4c5ab037a2afb4a9a1def95e194e5f.pdf"}, {"id": "885c37f94e9edbbb2177cfba8cb1ad840b2a5f20", "title": "Simultaneous Local Binary Feature Learning and Encoding for Homogeneous and Heterogeneous Face Recognition", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8006255"}, {"id": "279acfde0286bb76dd7717abebc3c8acf12d2c5f", "title": "Local Gradient Order Pattern for Face Representation and Recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2014, "pdf": "http://www.cbsr.ia.ac.cn/users/zlei/papers/ICPR2014/Lei-ICPR-14.pdf"}, {"id": "17f472a7cb25bf1e76ff29181b1d40585e2ae5c1", "title": "Fusing binary templates for multi-biometric cryptosystems", "addresses": [{"address": "Hong Kong Baptist University", "lat": "22.38742010", "lng": "114.20822220", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/BTAS.2015.7358764"}, {"id": "cf671dc13696d1643cc1f32f7d32c329b16cd745", "title": "Multiple Fisher Classifiers Combination for Face Recognition based on Grouping AdaBoosted Gabor Features", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/cf67/1dc13696d1643cc1f32f7d32c329b16cd745.pdf"}, {"id": "b53485dbdd2dc5e4f3c7cff26bd8707964bb0503", "title": "Pose-Invariant Face Alignment via CNN-Based Dense 3D Model Fitting", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2017", "pdf": "http://doi.org/10.1007/s11263-017-1012-z"}, {"id": "9fbcf40b0649c03ba0f38f940c34e7e6c9e04c03", "title": "A review on Gabor wavelets for face recognition", "addresses": [{"address": "University of Nottingham", "lat": "52.93874280", "lng": "-1.20029569", "type": "edu"}], "year": 2006, "pdf": "https://doi.org/10.1007/s10044-006-0033-y"}, {"id": "d103df0381582003c7a8930b68047b4f26d9b613", "title": "Quality Assessment and Restoration of Face Images in Long Range/High Zoom Video", "addresses": [{"address": "University of Tennessee", "lat": "35.95424930", "lng": "-83.93073950", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/d103/df0381582003c7a8930b68047b4f26d9b613.pdf"}, {"id": "56fb30b24e7277b47d366ca2c491749eee4d6bb1", "title": "Using Bayesian statistics and Gabor Wavelets for recognition of human faces", "addresses": [{"address": "Indian Institute of Science Bangalore", "lat": "13.02223470", "lng": "77.56718325", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICAPR.2015.7050658"}, {"id": "963a004e208ce4bd26fa79a570af61d31651b3c3", "title": "Computational methods for modeling facial aging: A survey", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2009, "pdf": "https://doi.org/10.1016/j.jvlc.2009.01.011"}, {"id": "4a18adc7f5a090a041528a88166671248703f6e0", "title": "Illumination Normalization for Robust Face Recognition Against Varying Lighting Conditions", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/c2c3/ecd39dd24e2b57ae6023536cc1fcd29d184a.pdf"}, {"id": "301662c2a6ed86e48f21c1d24bfc67b403201b0c", "title": "Repetition Suppression in Ventral Visual Cortex Is Diminished as a Function of Increasing Autistic Traits", "addresses": [{"address": "University of Western Australia", "lat": "-31.95040445", "lng": "115.79790037", "type": "edu"}, {"address": "University of Cambridge", "lat": "52.17638955", "lng": "0.14308882", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/688d/0dddf90995ba6248de148e58030cb8f558e8.pdf"}, {"id": "c48b2582429cc9ae427a264eed469d08b571acde", "title": "Facial Peculiarity Retrieval via Deep Neural Networks Fusion", "addresses": [{"address": "National University of Defense Technology, China", "lat": "28.22902090", "lng": "112.99483204", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/c48b/2582429cc9ae427a264eed469d08b571acde.pdf"}, {"id": "878ec66a3bb87f23f3f8fd96ee504f79e6100a95", "title": "THESIS EVALUATING THE PERFORMANCE OF IPHOTO FACIAL RECOGNITION AT THE BIOMETRIC VERIFICATION TASK", "addresses": [{"address": "Colorado State University", "lat": "40.57093580", "lng": "-105.08655256", "type": "edu"}], "year": "2012", "pdf": "https://pdfs.semanticscholar.org/878e/c66a3bb87f23f3f8fd96ee504f79e6100a95.pdf"}, {"id": "124f6992202777c09169343d191c254592e4428c", "title": "Visual Psychophysics for Making Face Recognition Algorithms More Explainable", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}, {"address": "Harvard University", "lat": "42.36782045", "lng": "-71.12666653", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.07140.pdf"}, {"id": "4156f9fc5983b09eb97ad3d9abc248b15440b955", "title": "2 Subspace Methods for Face Recognition : Singularity , Regularization , and Robustness", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/4156/f9fc5983b09eb97ad3d9abc248b15440b955.pdf"}, {"id": "533d70c914a4b84ec7f35ef6c74bb3acba4c26fc", "title": "Blaming the victims of your own mistakes: How visual search accuracy influences evaluation of stimuli.", "addresses": [{"address": "University of Iceland", "lat": "64.13727400", "lng": "-21.94561454", "type": "edu"}, {"address": "University College London", "lat": "51.52316070", "lng": "-0.12820370", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/533d/70c914a4b84ec7f35ef6c74bb3acba4c26fc.pdf"}, {"id": "ccd7a6b9f23e983a3fc6a70cc3b9c9673d70bf2c", "title": "Symmetrical Two-Dimensional PCA with Image Measures in Face Recognition", "addresses": [{"address": "University of Electronic Science and Technology of China", "lat": "40.01419050", "lng": "-83.03091430", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/ccd7/a6b9f23e983a3fc6a70cc3b9c9673d70bf2c.pdf"}, {"id": "60a006bdfe5b8bf3243404fae8a5f4a9d58fa892", "title": "A reference-based framework for pose invariant face recognition", "addresses": [{"address": "University of North Carolina at Chapel Hill", "lat": "35.91139710", "lng": "-79.05045290", "type": "edu"}], "year": 2015, "pdf": "http://alumni.cs.ucr.edu/~mkafai/papers/Paper_bwild.pdf"}, {"id": "19fed85436eff43e60b9476e3d8742dfedba6384", "title": "A Novel Multiple Kernel Sparse Representation based Classification for Face Recognition", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/19fe/d85436eff43e60b9476e3d8742dfedba6384.pdf"}, {"id": "244c5f88186475bc3b051be8ebb6422e4b8de707", "title": "Video from nearly still: An application to low frame-rate gait recognition", "addresses": [{"address": "Osaka University", "lat": "34.80809035", "lng": "135.45785218", "type": "edu"}], "year": 2012, "pdf": "http://www.am.sanken.osaka-u.ac.jp/~mansur/files/cvpr2012.pdf"}, {"id": "977bedd692c240c162481ef769b31e0f5455469a", "title": "A Two-Step Approach to Hallucinating Faces: Global Parametric Model and Local Nonparametric Model", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2001, "pdf": "http://pdfs.semanticscholar.org/977b/edd692c240c162481ef769b31e0f5455469a.pdf"}, {"id": "9e1c3c7f1dce662a877727a821bdf41c5cd906bb", "title": "Learning Disentangling and Fusing Networks for Face Completion Under Structured Occlusions", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/9e1c/3c7f1dce662a877727a821bdf41c5cd906bb.pdf"}, {"id": "4308f53244bbb6a1e22ba1d39e079e5065a51364", "title": "Ethnicity Identification from Face Images", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/4308/f53244bbb6a1e22ba1d39e079e5065a51364.pdf"}, {"id": "5b1f3a60518c3a552de09ed51646764551f4cb84", "title": "Multiple cue integration in transductive confidence machines for head pose classification", "addresses": [{"address": "Arizona State University", "lat": "33.30715065", "lng": "-111.67653157", "type": "edu"}], "year": 2008, "pdf": "http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/WorkShops/data/papers/121.pdf"}, {"id": "a96a7a381872ae40179ded0d79f905da0455d9d1", "title": "Segmentation of Saimaa Ringed Seals for Identification Purposes", "addresses": [{"address": "Monash University Malaysia", "lat": "3.06405715", "lng": "101.60059740", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/a96a/7a381872ae40179ded0d79f905da0455d9d1.pdf"}, {"id": "9a7fcd09afd8c3ae227e621795168c94ffbac71d", "title": "Action unit recognition transfer across datasets", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": 2011, "pdf": "http://mplab.ucsd.edu/wp-content/uploads/2011-WuEtAl-FERA-DatasetTransfer.pdf"}, {"id": "f2d813a987f0aed5056d5eccbadee8738bbd0a4b", "title": "Fast Matching by 2 Lines of Code for Large Scale Face Recognition Systems", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/f2d8/13a987f0aed5056d5eccbadee8738bbd0a4b.pdf"}, {"id": "8489236bbbb3298f4513c7e005a85ba7a48cc946", "title": "Vision and Touch for Grasping", "addresses": [{"address": "Ruhr-University Bochum", "lat": "51.44415765", "lng": "7.26096541", "type": "edu"}], "year": 2000, "pdf": "http://pdfs.semanticscholar.org/8489/236bbbb3298f4513c7e005a85ba7a48cc946.pdf"}, {"id": "1dede3e0f2e0ed2984aca8cd98631b43c3f887b9", "title": "A vote of confidence based interest point detector", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2013, "pdf": "http://www3.ntu.edu.sg/home/EXDJiang/ICASSP13-3.pdf"}, {"id": "4c56f119ebf7c71f2a83e4d79e8d88314b8e6044", "title": "An other-race effect for face recognition algorithms", "addresses": [{"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}, {"address": "University of Texas at Dallas", "lat": "32.98207990", "lng": "-96.75662780", "type": "edu"}], "year": 2011, "pdf": "http://www.nist.gov/customcf/get_pdf.cfm?pub_id=906254"}, {"id": "1b67053c682dcbc9dc368de89fff32f787320a96", "title": "Quality-Driven Face Occlusion Detection and Recovery", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2007, "pdf": "http://mmlab.ie.cuhk.edu.hk/archive/2007/CVPR07_face01.pdf"}, {"id": "86274e426bfe962d5cb994d5d9c6829f64410c32", "title": "Face Recognition in Different Subspaces: A Comparative Study", "addresses": [{"address": "University of Ljubljana", "lat": "46.05015580", "lng": "14.46907327", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/8627/4e426bfe962d5cb994d5d9c6829f64410c32.pdf"}, {"id": "4c170a0dcc8de75587dae21ca508dab2f9343974", "title": "FaceTracer: A Search Engine for Large Collections of Images with Faces", "addresses": [{"address": "Columbia University", "lat": "40.84198360", "lng": "-73.94368971", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf"}, {"id": "4276eb27e2e4fc3e0ceb769eca75e3c73b7f2e99", "title": "Face Recognition From Video", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/4276/eb27e2e4fc3e0ceb769eca75e3c73b7f2e99.pdf"}, {"id": "63f9f3f0e1daede934d6dde1a84fb7994f8929f0", "title": "Local Gabor binary pattern histogram sequence (LGBPHS): a novel non-statistical model for face representation and recognition", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2005, "pdf": "http://www.jdl.ac.cn/user/sgshan/pub/ICCV2005-ZhangShan-LGBP.pdf"}, {"id": "39e1fb5539737a17ae5fc25de30377dfaecfa100", "title": "Appearance-based face recognition and light-fields", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2004, "pdf": "https://www.ri.cmu.edu/pub_files/pub4/gross_ralph_2004_1/gross_ralph_2004_1.pdf"}, {"id": "e19ba2a6ce70fb94d31bb0b39387aa734e6860b0", "title": "A Different Approach to Appearance \u2013based Statistical Method for Face Recognition Using Median", "addresses": [{"address": "Anna University", "lat": "13.01058380", "lng": "80.23537360", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/e19b/a2a6ce70fb94d31bb0b39387aa734e6860b0.pdf"}, {"id": "528a6698911ff30aa648af4d0a5cf0dd9ee90b5c", "title": "Is All Face Processing Holistic ? The View from UCSD", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}, {"address": "University of Iowa", "lat": "41.66590000", "lng": "-91.57310307", "type": "edu"}], "year": "2003", "pdf": "https://pdfs.semanticscholar.org/528a/6698911ff30aa648af4d0a5cf0dd9ee90b5c.pdf"}, {"id": "4c5566d4cb47f4db45d46c6aaf324d6057b580bc", "title": "Gender recognition from face images with trainable COSFIRE filters", "addresses": [{"address": "University of Malta", "lat": "35.90232260", "lng": "14.48341890", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AVSS.2016.7738068"}, {"id": "462fe97ce53e58c8e2cb01c925b46bcf3bb53eda", "title": "How features of the human face affect recognition: a statistical comparison of three face recognition algorithms", "addresses": [{"address": "Colorado State University", "lat": "40.57093580", "lng": "-105.08655256", "type": "edu"}], "year": 2004, "pdf": "http://www.cs.colostate.edu/~draper/papers/givens_cvpr04.pdf"}, {"id": "c95e379aab32a1611f1f549fd11a3e9498ab5dae", "title": "Constructing Benchmark Databases and Protocols for Medical Image Analysis: Diabetic Retinopathy", "addresses": [{"address": "Tampere University of Technology", "lat": "61.44964205", "lng": "23.85877462", "type": "edu"}, {"address": "University of Tampere", "lat": "61.49412325", "lng": "23.77920678", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/c95e/379aab32a1611f1f549fd11a3e9498ab5dae.pdf"}, {"id": "9264b390aa00521f9bd01095ba0ba4b42bf84d7e", "title": "Displacement Template with Divide-&-Conquer Algorithm for Significantly Improving Descriptor Based Face Recognition Approaches", "addresses": [{"address": "Aberystwyth University", "lat": "52.41073580", "lng": "-4.05295501", "type": "edu"}, {"address": "University of Northern British Columbia", "lat": "53.89256620", "lng": "-122.81471592", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/9264/b390aa00521f9bd01095ba0ba4b42bf84d7e.pdf"}, {"id": "5bb9540375ba9bba22f8a22ba2990cfe7ff6780c", "title": "Discriminant Analysis of Haar Features for Accurate Eye Detection", "addresses": [{"address": "New Jersey Institute of Technology", "lat": "40.74230250", "lng": "-74.17928172", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/5bb9/540375ba9bba22f8a22ba2990cfe7ff6780c.pdf"}, {"id": "01a19d3e902d7431f533f5f0b54510a7fb9bda23", "title": "A Practical Face Relighting Method for Directional Lighting Normalization", "addresses": [{"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/3521/15bbb399b94865a7d870d1cd1a79e42104b8.pdf"}, {"id": "022f38febc47818a010dc64ca54f6e137055cc88", "title": "3D face texture modeling from uncalibrated frontal and profile images", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2012, "pdf": "http://biometrics.cse.msu.edu/Publications/Face/HanJain_3DFaceTextureModeling_UncalibratedFrontalProfileImages_BTAS12.pdf"}, {"id": "b13014374863715c421ed92d3827fc7e09a3e47a", "title": "Rapid Correspondence Finding in Networks of Cortical Columns", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": "2006", "pdf": "https://pdfs.semanticscholar.org/fe31/8312fd51fc65d132084c3862c85f067e6edf.pdf"}, {"id": "8a55c385c8cf76cadaa28c7ab1fde9dc28577b08", "title": "Positive definite dictionary learning for region covariances", "addresses": [{"address": "University of Minnesota", "lat": "44.97308605", "lng": "-93.23708813", "type": "edu"}], "year": 2011, "pdf": "http://www-users.cs.umn.edu/~boley/publications/papers/ICCV2011.pdf"}, {"id": "d40cd10f0f3e64fd9b0c2728089e10e72bea9616", "title": "Enhancing Face Identification Using Local Binary Patterns and K-Nearest Neighbors", "addresses": [{"address": "Hangzhou Dianzi University", "lat": "30.31255250", "lng": "120.34309460", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/d40c/d10f0f3e64fd9b0c2728089e10e72bea9616.pdf"}]}
\ No newline at end of file +{"id": "0c4a139bb87c6743c7905b29a3cfec27a5130652", "paper": {"paper_id": "0c4a139bb87c6743c7905b29a3cfec27a5130652", "key": "feret", "title": "The FERET Verification Testing Protocol for Face Recognition Algorithms", "year": 1998, "pdf": "http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf", "address": "", "name": "FERET"}, "address": null, "additional_papers": [{"paper_id": "0f0fcf041559703998abf310e56f8a2f90ee6f21", "key": "feret", "title": "The FERET Evaluation Methodology for Face-Recognition Algorithms", "year": 1997, "pdf": "http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf", "address": "", "name": "FERET"}, {"paper_id": "31de9b3dd6106ce6eec9a35991b2b9083395fd0b", "key": "feret", "title": "FERET (Face Recognition Technology) Recognition Algorithm Development and Test Results", "year": 1996, "pdf": "http://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf", "address": "", "name": "FERET"}, {"paper_id": "dc8b25e35a3acb812beb499844734081722319b4", "key": "feret", "title": "The FERET Promising Research database and evaluation procedure for face - recognition algorithms", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/dc8b/25e35a3acb812beb499844734081722319b4.pdf", "address": "", "name": "FERET"}], "citations": [{"id": "919d0e681c4ef687bf0b89fe7c0615221e9a1d30", "title": "Fractal Techniques for Face Recognition", "addresses": [{"address": "Queensland University of Technology", "lat": "-27.47715625", "lng": "153.02841004", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/919d/0e681c4ef687bf0b89fe7c0615221e9a1d30.pdf"}, {"id": "51ed4c92cab9336a2ac41fa8e0293c2f5f9bf3b6", "title": "A Survey of Face Detection, Extraction and Recognition", "addresses": [{"address": "Huazhong University of Science and Technology", "lat": "30.50975370", "lng": "114.40628810", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/51ed/4c92cab9336a2ac41fa8e0293c2f5f9bf3b6.pdf"}, {"id": "8aff9c8a0e17be91f55328e5be5e94aea5227a35", "title": "Sparse Tensor Discriminant Color Space for Face Verification", "addresses": [{"address": "Jilin University", "lat": "22.05356500", "lng": "113.39913285", "type": "edu"}, {"address": "Raytheon BBN Technologies", "lat": "42.38980550", "lng": "-71.14759860", "type": "company"}, {"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2012, "pdf": "https://doi.org/10.1109/TNNLS.2012.2191620"}, {"id": "d3b5a52062e5f5415df527705cb24af9b0846617", "title": "Advances and Challenges in 3D and 2D+3D Human Face Recognition", "addresses": [{"address": "University of Texas at Austin", "lat": "30.28415100", "lng": "-97.73195598", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/d3b5/a52062e5f5415df527705cb24af9b0846617.pdf"}, {"id": "03167776e17bde31b50f294403f97ee068515578", "title": "Chapter 11. Facial Expression Analysis", "addresses": [{"address": "University of Pittsburgh", "lat": "40.44415295", "lng": "-79.96243993", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/0316/7776e17bde31b50f294403f97ee068515578.pdf"}, {"id": "658eb1fd14808d10e0f4fee99c5506a1bb0e351a", "title": "Multi-Discriminant Classification Algorithm for Face Verification", "addresses": [{"address": "National Cheng Kung University", "lat": "22.99919160", "lng": "120.21625134", "type": "edu"}], "year": "2008", "pdf": "https://pdfs.semanticscholar.org/658e/b1fd14808d10e0f4fee99c5506a1bb0e351a.pdf"}, {"id": "49570b41bd9574bd9c600e24b269d945c645b7bd", "title": "A Framework for Performance Evaluation of Face Recognition Algorithms", "addresses": [{"address": "Arizona State University", "lat": "33.30715065", "lng": "-111.67653157", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/4957/0b41bd9574bd9c600e24b269d945c645b7bd.pdf"}, {"id": "d65b82b862cf1dbba3dee6541358f69849004f30", "title": "2.5D Elastic graph matching", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/d65b/82b862cf1dbba3dee6541358f69849004f30.pdf"}, {"id": "6d5e12ee5d75d5f8c04a196dd94173f96dc8603f", "title": "Learning a similarity metric discriminatively, with application to face verification", "addresses": [{"address": "Courant Institute of Mathematical Sciences", "lat": "40.72864840", "lng": "-73.99568630", "type": "edu"}, {"address": "New York University", "lat": "40.72925325", "lng": "-73.99625394", "type": "edu"}], "year": 2005, "pdf": "http://www.cs.toronto.edu/~hinton/csc2535_06/readings/chopra-05.pdf"}, {"id": "0e1403f2182609fb64ed72913f7294fea7d02bd6", "title": "Learning Support Vectors for Face Verification and Recognition", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}, {"address": "Czech Technical University", "lat": "50.07642960", "lng": "14.41802312", "type": "edu"}], "year": 2000, "pdf": "http://pdfs.semanticscholar.org/9457/cdb4b1f4764f70fe86b50e26abc34930f882.pdf"}, {"id": "fe9a6a93af9c32f6b0454a7cf6897409124514bd", "title": "Designing a smart card face verification system", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/fe9a/6a93af9c32f6b0454a7cf6897409124514bd.pdf"}, {"id": "92a3d5ab3eb540a11eddf1b836c1db28640b2746", "title": "Face Recognition using 3D Facial Shape and Color Map Information: Comparison and Combination", "addresses": [{"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/92a3/d5ab3eb540a11eddf1b836c1db28640b2746.pdf"}, {"id": "23fc83c8cfff14a16df7ca497661264fc54ed746", "title": "Comprehensive Database for Facial Expression Analysis", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "University of Pittsburgh", "lat": "40.44415295", "lng": "-79.96243993", "type": "edu"}], "year": 2000, "pdf": "http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf"}, {"id": "54bb25a213944b08298e4e2de54f2ddea890954a", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Middlesex University", "lat": "51.59029705", "lng": "-0.22963221", "type": "edu"}], "year": 2017, "pdf": "http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf"}, {"id": "aec46facf3131a5be4fc23db4ebfb5514e904ae3", "title": "Audio to the rescue", "addresses": [{"address": "Purdue University", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/aec4/6facf3131a5be4fc23db4ebfb5514e904ae3.pdf"}, {"id": "544c06584c95bfdcafbd62e04fb796e575981476", "title": "Human Identification from Body Shape", "addresses": [{"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/544c/06584c95bfdcafbd62e04fb796e575981476.pdf"}, {"id": "84a74ef8680b66e6dccbc69ae80321a52780a68e", "title": "Facial Expression Recognition", "addresses": [{"address": "University of Pittsburgh", "lat": "40.44415295", "lng": "-79.96243993", "type": "edu"}, {"address": "Robotics Institute", "lat": "13.65450525", "lng": "100.49423171", "type": "edu"}], "year": "2011", "pdf": "http://doi.org/10.1007/978-0-85729-932-1_19"}, {"id": "b3cc2554449fb10002250bbc178e1009fc2fdb70", "title": "Face Recognition Based on Local Zernike Moments", "addresses": [{"address": "Eastern Mediterranean University", "lat": "35.14479945", "lng": "33.90492318", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/b3cc/2554449fb10002250bbc178e1009fc2fdb70.pdf"}, {"id": "fbfb0de017d57c5f282050dadb77797d97785ba5", "title": "Enabling EBGM Face Authentication on mobile devices", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/fbfb/0de017d57c5f282050dadb77797d97785ba5.pdf"}, {"id": "0a602b85c80cef7d38209226188aaab94d5349e8", "title": "THE FLORIDA STATE UNIVERSITY COLLEGE OF ARTS AND SCIENCES AUTOMATED FACE TRACKING AND RECOGNITION By MATTHEW", "addresses": [{"address": "Florida State University", "lat": "30.44235995", "lng": "-84.29747867", "type": "edu"}], "year": "", "pdf": "http://pdfs.semanticscholar.org/0a60/2b85c80cef7d38209226188aaab94d5349e8.pdf"}, {"id": "a2bcfba155c990f64ffb44c0a1bb53f994b68a15", "title": "The Photoface database", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": 2011, "pdf": "https://doi.org/10.1109/CVPRW.2011.5981840"}, {"id": "425833b5fe892b00dcbeb6e3975008e9a73a5a72", "title": "A Review of Performance Evaluation for Biometrics Systems", "addresses": [{"address": "Yonsei University", "lat": "37.56004060", "lng": "126.93692480", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/4258/33b5fe892b00dcbeb6e3975008e9a73a5a72.pdf"}, {"id": "7ef44b7c2b5533d00001ae81f9293bdb592f1146", "title": "D\u00e9tection des \u00e9motions \u00e0 partir de vid\u00e9os dans un environnement non contr\u00f4l\u00e9 Detection of emotions from video in non-controlled environment", "addresses": [{"address": "Aalborg University", "lat": "57.01590275", "lng": "9.97532827", "type": "edu"}], "year": "2003", "pdf": "https://pdfs.semanticscholar.org/7ef4/4b7c2b5533d00001ae81f9293bdb592f1146.pdf"}, {"id": "6e968f74fd6b4b3b172c787f298b3d4746ec5cc9", "title": "A 3D Polygonal Line Chains Matching Method for Face Recognition", "addresses": [{"address": "Griffith University", "lat": "-27.55339750", "lng": "153.05336234", "type": "edu"}], "year": 2013, "pdf": "http://www.ict.griffith.edu.au/~junzhou/papers/C_DICTA_2013_C.pdf"}, {"id": "3a1c3307f57ef09577ac0dc8cd8b090a4fe8091f", "title": "Thermal-to-visible face recognition using partial least squares.", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/3a1c/3307f57ef09577ac0dc8cd8b090a4fe8091f.pdf"}, {"id": "81a8b2e55bcea9d9b26e67fcbb5a30ca8a8defc3", "title": "Database size effects on performance on a smart card face verification system", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2006, "pdf": "http://multispectral-imagery-lab.sandbox.wvu.edu/files/d/337b61b4-b6af-4c96-8314-c282ebebf299/databasesizeeffectsonperformancesmartcardfaceverification.pdf"}, {"id": "b8b0f0ca35cb02334aaa3192559fb35f0c90f8fa", "title": "Face Recognition in Low-resolution Images by Using Local Zernike Moments", "addresses": [{"address": "Istanbul Technical University", "lat": "41.10427915", "lng": "29.02231159", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/b8b0/f0ca35cb02334aaa3192559fb35f0c90f8fa.pdf"}, {"id": "76d1c6c6b67e67ced1f19a89a5034dafc9599f25", "title": "Understanding OSN-based facial disclosure against face authentication systems", "addresses": [{"address": "Singapore Management University", "lat": "1.29500195", "lng": "103.84909214", "type": "edu"}], "year": 2014, "pdf": "http://doi.acm.org/10.1145/2590296.2590315"}, {"id": "8a12edaf81fd38f81057cf9577c822eb09ff6fc1", "title": "Measuring and mitigating targeted biometric impersonation", "addresses": [{"address": "University of Southampton", "lat": "50.89273635", "lng": "-1.39464295", "type": "edu"}, {"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/8a12/edaf81fd38f81057cf9577c822eb09ff6fc1.pdf"}, {"id": "4b86e711658003a600666d3ccfa4a9905463df1c", "title": "Fusion of Appearance Image and Passive Stereo Depth Map for Face Recognition Based on the Bilateral 2DLDA", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": "2007", "pdf": "https://pdfs.semanticscholar.org/4b86/e711658003a600666d3ccfa4a9905463df1c.pdf"}, {"id": "4b8d80f91d271f61b26db5ad627e24e59955c56a", "title": "Learning Long-Range Vision for an Offroad Robot", "addresses": [{"address": "Courant Institute of Mathematical Sciences", "lat": "40.72864840", "lng": "-73.99568630", "type": "edu"}, {"address": "New York University", "lat": "40.72925325", "lng": "-73.99625394", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/4b8d/80f91d271f61b26db5ad627e24e59955c56a.pdf"}, {"id": "7af15295224c3ad69d56f17ff635763dd008a8a4", "title": "Learning Support Vectors for Face Authentication: Sensitivity to Mis-Registrations", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}, {"address": "Czech Technical University", "lat": "50.07642960", "lng": "14.41802312", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/7af1/5295224c3ad69d56f17ff635763dd008a8a4.pdf"}, {"id": "1e3739716e163fce6fded71eda078a18334aa83b", "title": "The HFB Face Database for Heterogeneous Face Biometrics research", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2009, "pdf": "https://doi.org/10.1109/CVPRW.2009.5204149"}, {"id": "a0d6390dd28d802152f207940c7716fe5fae8760", "title": "Bayesian Face Revisited: A Joint Formulation", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/a0d6/390dd28d802152f207940c7716fe5fae8760.pdf"}, {"id": "b19ca50a9e2415072a97482005fe0b77a8a495ce", "title": "Hierarchical Direct Appearance Model for Elastic Labeled Graph Localization", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/b19c/a50a9e2415072a97482005fe0b77a8a495ce.pdf"}, {"id": "1b3e66bef13f114943d460b4f942e941b4761ba2", "title": "Subspace Approximation of Face Recognition Algorithms: An Empirical Study", "addresses": [{"address": "University of South Florida", "lat": "28.05999990", "lng": "-82.41383619", "type": "edu"}, {"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}], "year": 2008, "pdf": "http://www.nist.gov/customcf/get_pdf.cfm?pub_id=890061"}, {"id": "dbc749490275db26337c7e3201027e8cef8e371c", "title": "Multi-band Gradient Component Pattern (MGCP): A New Statistical Feature for Face Recognition", "addresses": [{"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/dbc7/49490275db26337c7e3201027e8cef8e371c.pdf"}, {"id": "5bf954ca82b42865c49eef4b064278b82f3b38de", "title": "Re-engaging with the past: recapitulation of encoding operations during episodic retrieval", "addresses": [{"address": "University of Edinburgh", "lat": "55.94951105", "lng": "-3.19534913", "type": "edu"}, {"address": "University of Calgary", "lat": "51.07840380", "lng": "-114.12870770", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/80b0/045eed3a1fc9ab502963f6fb3e6f70a2f638.pdf"}, {"id": "ca458f189c1167e42d3a5aaf81efc92a4c008976", "title": "Double Shrinking Sparse Dimension Reduction", "addresses": [{"address": "University of Technology", "lat": "-33.88405040", "lng": "151.19922540", "type": "edu"}], "year": 2013, "pdf": "https://doi.org/10.1109/TIP.2012.2202678"}, {"id": "b20a8fc556aed9ab798fcf31e4f971dbc67a9edf", "title": "An Adept Segmentation Algorithm and Its Application to the Extraction of Local Regions Containing Fiducial Points", "addresses": [{"address": "Eastern Mediterranean University", "lat": "35.14479945", "lng": "33.90492318", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/b20a/8fc556aed9ab798fcf31e4f971dbc67a9edf.pdf"}, {"id": "80290f2a38741e20a38de7c00d80353604343ef8", "title": "Eigenfeature Optimization for Face Detection", "addresses": [{"address": "University of Calgary", "lat": "51.07840380", "lng": "-114.12870770", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/8029/0f2a38741e20a38de7c00d80353604343ef8.pdf"}, {"id": "d1836e137787fadb28d3418e029534765bcf1dae", "title": "Analysis , Synthesis and Recognition of Human Faces with Pose Variations", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": 2001, "pdf": "http://pdfs.semanticscholar.org/d183/6e137787fadb28d3418e029534765bcf1dae.pdf"}, {"id": "4a24d41aef0041ef82916d2316eea86f6c45c47f", "title": "Impact of Full Rank Principal Component Analysis on Classification Algorithms for Face Recognition", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}, {"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/4a24/d41aef0041ef82916d2316eea86f6c45c47f.pdf"}, {"id": "7c7fb5c70bdabe8442c46c791fb2db00c490410b", "title": "Human Face Recognition using Gabor based Kernel Entropy Component Analysis", "addresses": [{"address": "Indian Statistical Institute, Kolkata", "lat": "22.64815210", "lng": "88.37681700", "type": "edu"}, {"address": "Jadavpur University", "lat": "22.56115370", "lng": "88.41310194", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/7c7f/b5c70bdabe8442c46c791fb2db00c490410b.pdf"}, {"id": "19e62a56b6772bbd37dfc6b8f948e260dbb474f5", "title": "Cross-Domain Metric Learning Based on Information Theory", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/19e6/2a56b6772bbd37dfc6b8f948e260dbb474f5.pdf"}, {"id": "c1cf5dda56c72b65e86f3a678f76644f22212748", "title": "Face Hallucination via Semi-kernel Partial Least Squares", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/c1cf/5dda56c72b65e86f3a678f76644f22212748.pdf"}, {"id": "e69ac130e3c7267cce5e1e3d9508ff76eb0e0eef", "title": "Addressing the illumination challenge in two-dimensional face recognition: a survey", "addresses": [{"address": "University of Houston", "lat": "29.72079020", "lng": "-95.34406271", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/e69a/c130e3c7267cce5e1e3d9508ff76eb0e0eef.pdf"}, {"id": "e39a66a6d1c5e753f8e6c33cd5d335f9bc9c07fa", "title": "Weakly Supervised Learning for Unconstrained Face Processing", "addresses": [{"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}], "year": "2014", "pdf": "https://pdfs.semanticscholar.org/e39a/66a6d1c5e753f8e6c33cd5d335f9bc9c07fa.pdf"}, {"id": "bc9003ad368cb79d8a8ac2ad025718da5ea36bc4", "title": "Facial expression recognition with a three-dimensional face model", "addresses": [{"address": "Technical University Munich", "lat": "48.14955455", "lng": "11.56775314", "type": "edu"}], "year": "2011", "pdf": "https://pdfs.semanticscholar.org/bc90/03ad368cb79d8a8ac2ad025718da5ea36bc4.pdf"}, {"id": "d6b0a1f6dfb995436b45045b56e966d8e57b0990", "title": "Gait analysis and recognition for automated visual surveillance", "addresses": [{"address": "University of Southampton", "lat": "50.89273635", "lng": "-1.39464295", "type": "edu"}], "year": "2008", "pdf": "https://pdfs.semanticscholar.org/d6b0/a1f6dfb995436b45045b56e966d8e57b0990.pdf"}, {"id": "02ae77f4c289426f18e83ce6e295d39538fb0fcc", "title": "Dependency Modeling for Information Fusion with Applications in Visual Recognition", "addresses": [{"address": "Hong Kong Baptist University", "lat": "22.38742010", "lng": "114.20822220", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/02ae/77f4c289426f18e83ce6e295d39538fb0fcc.pdf"}, {"id": "078549cb5474b024d203f96954646cacef219682", "title": "Single Image Face Recognition based on Gabor, Sobel and Local Ternary Pattern", "addresses": [{"address": "Jahangirnagar University", "lat": "23.88331200", "lng": "90.26939210", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/1b42/0d5cf66e60b540ecdb352a287c85d9d7e2a4.pdf"}, {"id": "3dfb822e16328e0f98a47209d7ecd242e4211f82", "title": "Cross-Age LFW: A Database for Studying Cross-Age Face Recognition in Unconstrained Environments", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.08197.pdf"}, {"id": "1320c42b348c5342c2ad6a60e3ded3ff0bd56f7f", "title": "A Viewpoint Invariant, Sparsely Registered, Patch Based, Face\u00a0Verifier", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2007, "pdf": "https://doi.org/10.1007/s11263-007-0119-z"}, {"id": "17cf6195fd2dfa42670dc7ada476e67b381b8f69", "title": "Automatic Face Region Tracking for Highly Accurate Face Recognition in Unconstrained Environments", "addresses": [{"address": "Chung-Ang University", "lat": "37.50882000", "lng": "126.96190000", "type": "edu"}, {"address": "Korea Electronics Technology Institute", "lat": "37.40391700", "lng": "127.15978600", "type": "edu"}, {"address": "University of Tennessee", "lat": "35.95424930", "lng": "-83.93073950", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/17cf/6195fd2dfa42670dc7ada476e67b381b8f69.pdf"}, {"id": "b5930275813a7e7a1510035a58dd7ba7612943bc", "title": "Face Recognition Using L-Fisherfaces", "addresses": [{"address": "Beijing Jiaotong University", "lat": "39.94976005", "lng": "116.33629046", "type": "edu"}, {"address": "Institute of Information Science", "lat": "25.04107280", "lng": "121.61475620", "type": "edu"}, {"address": "Shandong University of Science and Technology", "lat": "36.00146435", "lng": "120.11624057", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/b593/0275813a7e7a1510035a58dd7ba7612943bc.pdf"}, {"id": "281cc188bf7588681cdf8e325b0ed13ac927e2e6", "title": "A Multi-Modal Person Recognition System for Social Robots", "addresses": [{"address": "Fraser University", "lat": "44.96898360", "lng": "-93.20941629", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/281c/c188bf7588681cdf8e325b0ed13ac927e2e6.pdf"}, {"id": "89ac06ccbc410224f4d05d5ae8fa46c4fe3cbe0f", "title": "Video Based Face Verification", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2001, "pdf": "http://pdfs.semanticscholar.org/947e/53c1d9035df85a3bc1b852928acbe889daf4.pdf"}, {"id": "72b4b8f4a9f25cac5686231b44a2220945fd2ff6", "title": "Face Verification Using Modeled Eigenspectrum", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/72b4/b8f4a9f25cac5686231b44a2220945fd2ff6.pdf"}, {"id": "001d909eb3513fb6fad8fb2355971441255458c3", "title": "Minimal local reconstruction error measure based discriminant feature extraction and classification", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2008, "pdf": "http://mplab.ucsd.edu/wordpress/wp-content/uploads/CVPR2008/Conference/data/papers/023.pdf"}, {"id": "95289007f2f336e6636cf8f920225b8d47c6e94f", "title": "Automatic Training Image Acquisition and Effective Feature Selection From Community-Contributed Photos for Facial Attribute Detection", "addresses": [{"address": "National Taiwan University", "lat": "25.01682835", "lng": "121.53846924", "type": "edu"}, {"address": "Academia Sinica, Taiwan", "lat": "25.04117270", "lng": "121.61465180", "type": "edu"}], "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6472796"}, {"id": "749ebfa344b6d27de898d619cea0b28ad3894ff2", "title": "Predicting Biometric Authentication System Performance Across Different Application Conditions: A Bootstrap Enhanced Parametric Approach", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/749e/bfa344b6d27de898d619cea0b28ad3894ff2.pdf"}, {"id": "e3bb87e858bc752436c7a8da3fca68b2dacbf3e8", "title": "On the Evaluation of Methods for the Recovery of Plant Root Systems from X-ray Computed Tomography Images", "addresses": [{"address": "University of Nottingham", "lat": "52.93874280", "lng": "-1.20029569", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/e3bb/87e858bc752436c7a8da3fca68b2dacbf3e8.pdf"}, {"id": "a94cae786d515d3450d48267e12ca954aab791c4", "title": "YawDD: a yawning detection dataset", "addresses": [{"address": "University of Ottawa", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu"}], "year": 2014, "pdf": "http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf"}, {"id": "8a3bb63925ac2cdf7f9ecf43f71d65e210416e17", "title": "ShearFace: Efficient Extraction of Anisotropic Features for Face Recognition", "addresses": [{"address": "University of Sfax, Tunisia", "lat": "34.73610660", "lng": "10.74272750", "type": "edu"}], "year": 2014, "pdf": "https://www.math.uh.edu/~dlabate/ShearFace_ICPR2014.pdf"}, {"id": "998cdde7c83a50f0abac69c7c3d20f3729a65d00", "title": "Redundancy effects in the perception and memory of visual objects", "addresses": [{"address": "University of Minnesota", "lat": "44.97308605", "lng": "-93.23708813", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/998c/dde7c83a50f0abac69c7c3d20f3729a65d00.pdf"}, {"id": "31dd6bafd6e7c6095eb8d0591abac3b0106a75e3", "title": "Face Recognition In Unconstrained Environment", "addresses": [{"address": "Khon Kaen University", "lat": "16.46007565", "lng": "102.81211798", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8457336"}, {"id": "beea33ccd9423d48d6cfb928469bbe7841e63e73", "title": "DIARETDB1 diabetic retinopathy database and evaluation protocol", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/beea/33ccd9423d48d6cfb928469bbe7841e63e73.pdf"}, {"id": "4cf0c6d3da8e20d6f184a4eaa6865d61680982b8", "title": "Face recognition based on 3D mesh model", "addresses": [{"address": "Hong Kong University of Science and Technology", "lat": "22.33863040", "lng": "114.26203370", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/4cf0/c6d3da8e20d6f184a4eaa6865d61680982b8.pdf"}, {"id": "ba6082291b018b14f8da4f96afc631918bad3a1b", "title": "Calibration , Recognition , and Shape from Silhouettes of Stones", "addresses": [{"address": "University of Cape Town", "lat": "-33.95828745", "lng": "18.45997349", "type": "edu"}], "year": "2007", "pdf": "https://pdfs.semanticscholar.org/3f5b/0cf2ed392045026ea0d1d67145d0400e516f.pdf"}, {"id": "09ef369754fccb530e658b8331c405867c0d45a6", "title": "Comparison of Face Verification Results on the XM2VTS Database", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}, {"address": "Aristotle University of Thessaloniki", "lat": "40.62984145", "lng": "22.95889350", "type": "edu"}, {"address": "University of Sydney", "lat": "-33.88890695", "lng": "151.18943366", "type": "edu"}, {"address": "ETH Z\u00fcrich", "lat": "47.37645340", "lng": "8.54770931", "type": "edu"}], "year": 2000, "pdf": "http://pdfs.semanticscholar.org/09ef/369754fccb530e658b8331c405867c0d45a6.pdf"}, {"id": "b1e218046a28d10ec0be3272809608dea378eddc", "title": "Overview of the Multiple Biometrics Grand Challenge", "addresses": [{"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}], "year": "2009", "pdf": "https://pdfs.semanticscholar.org/12c5/66e2eee7bbaf45b894e7282f87f00f1db20a.pdf"}, {"id": "15122ef718265beb4cb1a74e5d1f41c5edcb4ba5", "title": "On the Euclidean distance of images", "addresses": [{"address": "Peking University", "lat": "39.99223790", "lng": "116.30393816", "type": "edu"}], "year": 2005, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2005.165"}, {"id": "9d6e60d49e92361f8f558013065dfa67043dd337", "title": "Applications of Computational Geometry and Computer Vision", "addresses": [{"address": "Central Washington University", "lat": "47.00646895", "lng": "-120.53673040", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/9d6e/60d49e92361f8f558013065dfa67043dd337.pdf"}, {"id": "121839d3254820b7017b07ef47acc89b975286a9", "title": "Feature Extraction for Incomplete Data via Low-rank Tucker Decomposition", "addresses": [{"address": "Hong Kong Baptist University", "lat": "22.38742010", "lng": "114.20822220", "type": "edu"}, {"address": "Guangdong University of Technology", "lat": "23.13538360", "lng": "113.29470496", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/92a2/5b281f1637d125cefefcbfc382f48f456f4c.pdf"}, {"id": "88ed558bff3600f5354963d1abe762309f66111e", "title": "Real-World and Rapid Face Recognition Toward Pose and Expression Variations via Feature Library Matrix", "addresses": [{"address": "Amirkabir University of Technology", "lat": "35.70451400", "lng": "51.40972058", "type": "edu"}, {"address": "Semnan University", "lat": "35.60374440", "lng": "53.43445877", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/TIFS.2015.2393553"}, {"id": "016a8ed8f6ba49bc669dbd44de4ff31a79963078", "title": "Face relighting for face recognition under generic illumination", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2004, "pdf": "https://doi.org/10.1109/ICASSP.2004.1327215"}, {"id": "44f48a4b1ef94a9104d063e53bf88a69ff0f55f3", "title": "Automatically Building Face Datasets of New Domains from Weakly Labeled Data with Pretrained Models", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/44f4/8a4b1ef94a9104d063e53bf88a69ff0f55f3.pdf"}, {"id": "c423b0a0b7232a5cd0c3f4c75164923a3f04cdcd", "title": "Kernel Discriminant Learning with Application to Face Recognition", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/c423/b0a0b7232a5cd0c3f4c75164923a3f04cdcd.pdf"}, {"id": "2fd007088a75916d0bf50c493d94f950bf55c5e6", "title": "Projective Representation Learning for Discriminative Face Recognition", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1007/978-981-10-7302-1_1"}, {"id": "0cb613bf519b90d08d2f12623b41f02c638cea63", "title": "Face annotation for personal photos using context-assisted face recognition", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2008, "pdf": "http://koasas.kaist.ac.kr/bitstream/10203/22675/1/Face%20Annotation%20for%20Personal%20Photos%20Using%20Context%20Assisted%20Face%20Recognition.pdf"}, {"id": "30b6811205b42e92d7a82c606d4521319764250b", "title": "Low cost illumination invariant face recognition by down-up sampling self quotient image", "addresses": [{"address": "National Tsing Hua University", "lat": "24.79254840", "lng": "120.99511830", "type": "edu"}], "year": 2013, "pdf": "https://doi.org/10.1109/APSIPA.2013.6694367"}, {"id": "ff69da3510f5ffed224069faf62036e1aa9b6d26", "title": "Extended Set of Local Binary Patterns for Rapid Object Detection", "addresses": [{"address": "Czech Technical University", "lat": "50.07642960", "lng": "14.41802312", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/a256/3501ffd5a840fa4df0f3911a82e117df2f7f.pdf"}, {"id": "c207fd762728f3da4cddcfcf8bf19669809ab284", "title": "Face Alignment Using Boosting and Evolutionary Search", "addresses": [{"address": "Southeast University", "lat": "32.05752790", "lng": "118.78682252", "type": "edu"}, {"address": "University of Twente", "lat": "52.23801390", "lng": "6.85667610", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/c207/fd762728f3da4cddcfcf8bf19669809ab284.pdf"}, {"id": "23a450a075d752f1ec2b1e5e225de13d3bc37636", "title": "Subspace Learning in Krein Spaces: Complete Kernel Fisher Discriminant Analysis with Indefinite Kernels", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/23a4/50a075d752f1ec2b1e5e225de13d3bc37636.pdf"}, {"id": "521c2e9892eb22f65ba5b0d4c8d2f4c096d9fdf3", "title": "Model-Based Face De-Identification", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Robotics Institute", "lat": "13.65450525", "lng": "100.49423171", "type": "edu"}], "year": 2006, "pdf": "http://www.ri.cmu.edu/pub_files/pub4/gross_ralph_2006_2/gross_ralph_2006_2.pdf"}, {"id": "91e507d2d8375bf474f6ffa87788aa3e742333ce", "title": "Robust Face Recognition Using Probabilistic Facial Trait Code", "addresses": [{"address": "National Taiwan University", "lat": "25.01682835", "lng": "121.53846924", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/91e5/07d2d8375bf474f6ffa87788aa3e742333ce.pdf"}, {"id": "db3e78704df982b2af92282e4a74aa3b59ea3a2e", "title": "A recurrent dynamic model for correspondence-based face recognition.", "addresses": [{"address": "University College London", "lat": "51.52316070", "lng": "-0.12820370", "type": "edu"}], "year": "2008", "pdf": "https://pdfs.semanticscholar.org/1e69/9d9e0470c5d39ff78eaf21b394a90691c513.pdf"}, {"id": "744b794f0047b008c517752fc9bb1100e5f120cc", "title": "Multiple-exemplar discriminant analysis for face recognition", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2004, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICPR.2004.1333736"}, {"id": "44d93039eec244083ac7c46577b9446b3a071f3e", "title": "Empirical comparisons of several preprocessing methods for illumination insensitive face recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": "2005", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1415571"}, {"id": "2d435b7510eeda648dc34d5b8ac921499d525218", "title": "Improving Variance Estimation in Biometric Systems", "addresses": [{"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}, {"address": "University of Colorado at Colorado Springs", "lat": "38.89646790", "lng": "-104.80505940", "type": "edu"}], "year": 2007, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2007.383395"}, {"id": "ab7bcbaa9e77d35634302b021d47e7889628a88d", "title": "FACESKETCHID: A SYSTEM FOR FACIAL SKETCH TO MUGSHOT MATCHING by Scott", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/ab7b/cbaa9e77d35634302b021d47e7889628a88d.pdf"}, {"id": "92017bf2df5f6532d39c624ea209f37bb6728097", "title": "Attention Driven Face Recognition, Learning from Human Vision System", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "Peking University", "lat": "39.99223790", "lng": "116.30393816", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/9201/7bf2df5f6532d39c624ea209f37bb6728097.pdf"}, {"id": "841bf196ee0086c805bd5d1d0bddfadc87e424ec", "title": "Locally Kernel-based Nonlinear Regression for Face Recognition", "addresses": [{"address": "Amirkabir University of Technology", "lat": "35.70451400", "lng": "51.40972058", "type": "edu"}, {"address": "Islamic Azad University", "lat": "34.84529990", "lng": "48.55962120", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/841b/f196ee0086c805bd5d1d0bddfadc87e424ec.pdf"}, {"id": "40055c342c19ab492df04dae2e186cd0d6b5dc5e", "title": "Robust representations for face recognition: the power of averages.", "addresses": [{"address": "University of Glasgow", "lat": "55.87231535", "lng": "-4.28921784", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/a406/ad4bdf50f696191e7472b7a41d9d57ff046c.pdf"}, {"id": "c444c4dab97dd6d6696f56c1cacda051dde60448", "title": "Multiview Face Detection and Registration Requiring Minimal Manual Intervention", "addresses": [{"address": "A*STAR, Singapore", "lat": "1.29889260", "lng": "103.78731070", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2013, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.37"}, {"id": "235bebe7d0db37e6727dfa1246663be34027d96b", "title": "General Type-2 fuzzy edge detectors applied to face recognition systems", "addresses": [{"address": "Tijuana Institute of Technology, Mexico", "lat": "32.87853490", "lng": "-117.23583070", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/NAFIPS.2016.7851625"}, {"id": "65b1760d9b1541241c6c0222cc4ee9df078b593a", "title": "Enhanced Pictorial Structures for Precise Eye Localization Under Uncontrolled Conditions", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/65b1/760d9b1541241c6c0222cc4ee9df078b593a.pdf"}, {"id": "27b9e75bcaf9e12127f7181bcb7f1fcb105462c4", "title": "Local frequency descriptor for low-resolution face recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": 2011, "pdf": "http://www.cbsr.ia.ac.cn/users/zlei/papers/LEI-LFD-FG-11.pdf"}, {"id": "aecd24f4a41eb6942375b9c03adcb7e137250b3f", "title": "Tensor Sparse Coding for Region Covariances", "addresses": [{"address": "University of Minnesota", "lat": "44.97308605", "lng": "-93.23708813", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/aecd/24f4a41eb6942375b9c03adcb7e137250b3f.pdf"}, {"id": "c22df6df55f5c6539e1a4d2e2d50dbaab34007a7", "title": "Compact Binary Patterns (CBP) with Multiple Patch Classifiers for Fast and Accurate Face Recognition", "addresses": [{"address": "University of Nottingham", "lat": "52.93874280", "lng": "-1.20029569", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/c22d/f6df55f5c6539e1a4d2e2d50dbaab34007a7.pdf"}, {"id": "179f446aa297d6fe5c864b605286b946f85bb4ee", "title": "Fusion of static and dynamic body biometrics for gait recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2003, "pdf": "http://lear.inrialpes.fr/people/triggs/events/iccv03/cdrom/iccv03/1449_wang.pdf"}, {"id": "3d741315108b95cdb56d312648f5ad1c002c9718", "title": "Image-based face recognition under illumination and pose variations.", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/3d74/1315108b95cdb56d312648f5ad1c002c9718.pdf"}, {"id": "8ca3cfb9595ebc5b36a25659f6bbf362f0b14ae3", "title": "Spectral Clustering Based Null Space Linear Discriminant Analysis (SNLDA)", "addresses": [{"address": "Fudan University", "lat": "31.30104395", "lng": "121.50045497", "type": "edu"}, {"address": "Beijing Jiaotong University", "lat": "39.94976005", "lng": "116.33629046", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/8ca3/cfb9595ebc5b36a25659f6bbf362f0b14ae3.pdf"}, {"id": "908a899c716d63bd327dee4a72061db5674bdc92", "title": "Experiments with Face Recognition Using a Novel Approach Based on CVQ Technique", "addresses": [{"address": "Islamic Azad University", "lat": "34.84529990", "lng": "48.55962120", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/908a/899c716d63bd327dee4a72061db5674bdc92.pdf"}, {"id": "9f5383ec6ee5e810679e4a7e0a3f153f0ed3bb73", "title": "3D Shape and Pose Estimation of Face Images Using the Nonlinear Least-Squares Model", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/9f53/83ec6ee5e810679e4a7e0a3f153f0ed3bb73.pdf"}, {"id": "07c90e85ac0f74b977babe245dea0f0abcf177e3", "title": "An Image Preprocessing Algorithm for Illumination Invariant Face Recognition", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/07c9/0e85ac0f74b977babe245dea0f0abcf177e3.pdf"}, {"id": "0faab61c742609be74463d30b0eb1118dba4a4f3", "title": "Null Space Approach of Fisher Discriminant Analysis for Face Recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/0faa/b61c742609be74463d30b0eb1118dba4a4f3.pdf"}, {"id": "1319dbeaa28f8a9b19e03a7631e96393e08a07fa", "title": "Gender Recognition Using Fusion of Local and Global Facial Features", "addresses": [{"address": "King Saud University", "lat": "24.72464030", "lng": "46.62335012", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/1319/dbeaa28f8a9b19e03a7631e96393e08a07fa.pdf"}, {"id": "48381007b85e8a3b74e5401b2dfc1a5dfc897622", "title": "Sparse Representation and Dictionary Learning for Biometrics and Object Tracking", "addresses": [{"address": "University of Miami", "lat": "25.71733390", "lng": "-80.27866887", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/4838/1007b85e8a3b74e5401b2dfc1a5dfc897622.pdf"}, {"id": "852e7df8794b15413f1d71628939c3cc28580b12", "title": "Boosted Audio-Visual HMM for Speech Reading", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/852e/7df8794b15413f1d71628939c3cc28580b12.pdf"}, {"id": "c5c1575565e04cd0afc57d7ac7f7a154c573b38f", "title": "Face Refinement through a Gradient Descent Alignment Approach", "addresses": [{"address": "Robotics Institute", "lat": "13.65450525", "lng": "100.49423171", "type": "edu"}], "year": "2006", "pdf": "https://pdfs.semanticscholar.org/010a/f49ddb10c51b7913c2533910dd28ca39411c.pdf"}, {"id": "da7bbfa905d88834f8929cb69f41a1b683639f4b", "title": "Discriminant analysis with Gabor phase for robust face recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "HoHai University", "lat": "32.05765485", "lng": "118.75500040", "type": "edu"}, {"address": "Xidian University", "lat": "34.12358250", "lng": "108.83546000", "type": "edu"}], "year": "2012", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6199752"}, {"id": "546cbbb897022096511f6a71259e3b99c558224d", "title": "PCA vs. ICA: A Comparison on the FERET Data Set", "addresses": [{"address": "Colorado State University", "lat": "40.57093580", "lng": "-105.08655256", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/8a17/e16de6b932ec42e269621e29d99e46591fef.pdf"}, {"id": "6e7afe55d363adf80330116968163c7e9500f53b", "title": "SVD-based projection for face recognition", "addresses": [{"address": "National Tsing Hua University", "lat": "24.79254840", "lng": "120.99511830", "type": "edu"}], "year": 2007, "pdf": "http://www.cs.nthu.edu.tw/~cchen/Research/2007EitFace.pdf"}, {"id": "2a9946fb626a58d376fb1491ca8bf8fb4f68dcf9", "title": "Enlarge the Training Set Based on Inter-Class Relationship for Face Recognition from One Image per Person", "addresses": [{"address": "Shenzhen University", "lat": "22.53521465", "lng": "113.93159110", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/2a99/46fb626a58d376fb1491ca8bf8fb4f68dcf9.pdf"}, {"id": "38c61c11554135e09a2353afa536d010c7a53cbb", "title": "Learning the Detection of Faces in Natural Images", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/38c6/1c11554135e09a2353afa536d010c7a53cbb.pdf"}, {"id": "6bfb0f8dd1a2c0b44347f09006dc991b8a08559c", "title": "Multiview discriminative learning for age-invariant face recognition", "addresses": [{"address": "Lomonosov Moscow State University", "lat": "55.70229715", "lng": "37.53179777", "type": "edu"}, {"address": "Advanced Digital Sciences Center, Singapore", "lat": "1.30372570", "lng": "103.77377630", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}, {"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": 2013, "pdf": "https://www.computer.org/web/csdl/index/-/csdl/proceedings/fg/2013/5545/00/06553724.pdf"}, {"id": "d33b26794ea6d744bba7110d2d4365b752d7246f", "title": "Transfer Feature Representation via Multiple Kernel Learning", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/d33b/26794ea6d744bba7110d2d4365b752d7246f.pdf"}, {"id": "55f94957f753e74f6f0170a45dee746c5b013edb", "title": "Face Recognition Using Balanced Pairwise Classifier Training", "addresses": [{"address": "University of Kent", "lat": "51.29753440", "lng": "1.07296165", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/55f9/4957f753e74f6f0170a45dee746c5b013edb.pdf"}, {"id": "6dbe76f51091ca6a626a62846a946ce687c3dbe8", "title": "INCREMENTAL OBJECT MATCHING WITH PROBABILISTIC METHODS Doctoral dissertation", "addresses": [{"address": "Aalto University", "lat": "60.18558755", "lng": "24.82427330", "type": "edu"}], "year": "", "pdf": "http://pdfs.semanticscholar.org/6dbe/76f51091ca6a626a62846a946ce687c3dbe8.pdf"}, {"id": "314ad104401c78a83cfe8018412b6a2f33340fc6", "title": "Privacy protecting, intelligibility preserving video surveillance", "addresses": [{"address": "EURECOM", "lat": "43.61438600", "lng": "7.07112500", "type": "edu"}], "year": 2016, "pdf": "http://www.eurecom.fr/fr/publication/4966/download/sec-publi-4966.pdf"}, {"id": "08f6ad0a3e75b715852f825d12b6f28883f5ca05", "title": "Face recognition: Some challenges in forensics", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2011, "pdf": "http://www.cse.msu.edu/biometrics/Publications/Face/JainKlarePark_FaceRecognition_ChallengesinForensics_FG11.pdf"}, {"id": "3b64efa817fd609d525c7244a0e00f98feacc8b4", "title": "A Comprehensive Survey on Pose-Invariant Face Recognition", "addresses": [{"address": "University of Technology Sydney", "lat": "-33.88096510", "lng": "151.20107299", "type": "edu"}], "year": 2016, "pdf": "http://doi.acm.org/10.1145/2845089"}, {"id": "102cfd088799405d47c824735dc1356e5835dce7", "title": "Learning-based Face Synthesis for Pose-Robust Recognition from Single Image", "addresses": [{"address": "Australian National University", "lat": "-35.27769990", "lng": "149.11852700", "type": "edu"}, {"address": "University of Canberra", "lat": "-35.23656905", "lng": "149.08446994", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/d5d0/d25663ec0ff8099e613d2278f8a673b9729f.pdf"}, {"id": "fb6cc23fd6bd43bd4cacf6a57cd2c7c8dfe5269d", "title": "An experimental study on content-based face annotation of photos", "addresses": [{"address": "National Taiwan Normal University", "lat": "25.00823205", "lng": "121.53577153", "type": "edu"}], "year": "2009", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339084"}, {"id": "2910fcd11fafee3f9339387929221f4fc1160973", "title": "Evaluating Open-Universe Face Identification on the Web", "addresses": [{"address": "University of Central Florida", "lat": "28.59899755", "lng": "-81.19712501", "type": "edu"}, {"address": "Robotics Institute", "lat": "13.65450525", "lng": "100.49423171", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W16/papers/Becker_Evaluating_Open-Universe_Face_2013_CVPR_paper.pdf"}, {"id": "e0ea8ef91bd0a35aec31c9a493137163b4f042b6", "title": "Sparse representation with nearest subspaces for face recognition", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}, {"address": "Shenzhen University", "lat": "22.53521465", "lng": "113.93159110", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/e0ea/8ef91bd0a35aec31c9a493137163b4f042b6.pdf"}, {"id": "29639a071f67a6867000b53bcb97b37b3d090319", "title": "Gait Identification Considering Body Tilt by Walking Direction Changes", "addresses": [{"address": "Osaka University", "lat": "34.80809035", "lng": "135.45785218", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/2963/9a071f67a6867000b53bcb97b37b3d090319.pdf"}, {"id": "b161d261fabb507803a9e5834571d56a3b87d147", "title": "Gender recognition from face images using a geometric descriptor", "addresses": [{"address": "University of Campinas (UNICAMP)", "lat": "-22.81483740", "lng": "-47.06477080", "type": "edu"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8122913"}, {"id": "ede16b198b83d04b52dc3f0dafc11fd82c5abac4", "title": "LBP edge-mapped descriptor using MGM interest points for face recognition", "addresses": [{"address": "National Tsing Hua University", "lat": "24.79254840", "lng": "120.99511830", "type": "edu"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952343"}, {"id": "5539c0bee8fcf825e63a1abaa950615ebd9c6b49", "title": "Car Detection and Recognition Based on Rear View and Back Light Features", "addresses": [{"address": "Indian Institute of Technology Delhi", "lat": "28.54632595", "lng": "77.27325504", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/5539/c0bee8fcf825e63a1abaa950615ebd9c6b49.pdf"}, {"id": "691463f3f7acb0502e21b40958c1ecdee16d1fe0", "title": "Adaptive Markov Random Fields for Example-Based Super-resolution of Faces", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/eb46/25ad9143196021c3def560d025d346c46909.pdf"}, {"id": "0a2ddf88bd1a6c093aad87a8c7f4150bfcf27112", "title": "Patch-based models for visual object classes", "addresses": [{"address": "University College London", "lat": "51.52316070", "lng": "-0.12820370", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/0a2d/df88bd1a6c093aad87a8c7f4150bfcf27112.pdf"}, {"id": "82d5e927c4f1429c07552bfc7bebd5f0e3f2f444", "title": "Histogram Sequence of Local Gabor Binary Pattern for Face Description and Identification", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/82d5/e927c4f1429c07552bfc7bebd5f0e3f2f444.pdf"}, {"id": "e9a8a88b47d0bc20579f39eba1c380b07edc244f", "title": "Effects of the Facial and Racial Features on Gender Classification", "addresses": [{"address": "Istanbul Technical University", "lat": "41.10427915", "lng": "29.02231159", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/e9a8/a88b47d0bc20579f39eba1c380b07edc244f.pdf"}, {"id": "982fcead58be419e4f34df6e806204674a4bc579", "title": "Performance improvement of face recognition algorithms using occluded-region detection", "addresses": [{"address": "Azbil Corporation, Kawana, Japan", "lat": "35.33414870", "lng": "139.49433560", "type": "company"}, {"address": "Tohoku University", "lat": "38.25309450", "lng": "140.87365930", "type": "edu"}], "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6613012"}, {"id": "61f4429c085e8a93c4d7bdb9bff6fac38e58e5c6", "title": "Discriminant Neighborhood Structure Embedding Using Trace Ratio Criterion for Image Recognition", "addresses": [{"address": "Xidian University", "lat": "34.12358250", "lng": "108.83546000", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/61f4/429c085e8a93c4d7bdb9bff6fac38e58e5c6.pdf"}, {"id": "ec645bbc34d3ed264516df8b1add4d0cd6c35631", "title": "An improved Bayesian face recognition algorithm in PCA subspace", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/ec64/5bbc34d3ed264516df8b1add4d0cd6c35631.pdf"}, {"id": "3356074f4896bf2af7f46749fdc212a99d4932a6", "title": "Learning Low-Rank Class-Specific Dictionary and Sparse Intra-Class Variant Dictionary for Face Recognition", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}, {"address": "Zhejiang University of Technology", "lat": "30.29315340", "lng": "120.16204580", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/3356/074f4896bf2af7f46749fdc212a99d4932a6.pdf"}, {"id": "7f1f3d7b1a4e7fc895b77cb23b1119a6f13e4d3a", "title": "Multi-subregion based probabilistic approach toward pose-invariant face recognition", "addresses": [{"address": "Robotics Institute", "lat": "13.65450525", "lng": "100.49423171", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/7f1f/3d7b1a4e7fc895b77cb23b1119a6f13e4d3a.pdf"}, {"id": "1ab19e516b318ed6ab64822efe9b2328836107a4", "title": "Face Recognition System Using Multiple Face Model of Hybrid Fourier Feature Under Uncontrolled Illumination Variation", "addresses": [{"address": "Mando Corp.", "lat": "35.90775700", "lng": "127.76692200", "type": "company"}, {"address": "Samsung", "lat": "37.56653500", "lng": "126.97796920", "type": "company"}, {"address": "Samsung SAIT, Beijing", "lat": "39.90419990", "lng": "116.40739630", "type": "company"}], "year": 2011, "pdf": "https://doi.org/10.1109/TIP.2010.2083674"}, {"id": "e506cdb250eba5e70c5147eb477fbd069714765b", "title": "Heterogeneous Face Recognition", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2012", "pdf": "https://pdfs.semanticscholar.org/e506/cdb250eba5e70c5147eb477fbd069714765b.pdf"}, {"id": "97dbcc592ed048db545c6e9ed1f27372e8d1d4b8", "title": "Omnidirectional Gait Identification by Tilt Normalization and Azimuth View Transformation", "addresses": [{"address": "Osaka University", "lat": "34.80809035", "lng": "135.45785218", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/97db/cc592ed048db545c6e9ed1f27372e8d1d4b8.pdf"}, {"id": "838ed2aae603dec5851ebf5e4bc64b54db7f34be", "title": "Real-Time Ensemble Based Face Recognition System for Humanoid Robots", "addresses": [{"address": "University of Tartu", "lat": "58.38131405", "lng": "26.72078081", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/838e/d2aae603dec5851ebf5e4bc64b54db7f34be.pdf"}, {"id": "50b40ec042047b4292fd9b650969d4efbd20c9ed", "title": "Optimal gradient pursuit for face alignment", "addresses": [{"address": "GE Global Research Center", "lat": "42.82982480", "lng": "-73.87719385", "type": "edu"}], "year": 2011, "pdf": "http://cse.msu.edu/~liuxm/publication/Liu_GradientPursuit_FG2011.pdf"}, {"id": "f6a65be9a3790e8fd3b5116450a47a8e48a54d63", "title": "Parametric Piecewise Linear Subspace Method for Processing Facial Images with 3D Pose Variations", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": "", "pdf": "http://pdfs.semanticscholar.org/f6a6/5be9a3790e8fd3b5116450a47a8e48a54d63.pdf"}, {"id": "7b47eb8faaf9c2275cdc70299b850ed649ceec62", "title": "1D-LDA vs. 2D-LDA: When is vector-based linear discriminant analysis better than matrix-based?", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/7b47/eb8faaf9c2275cdc70299b850ed649ceec62.pdf"}, {"id": "38e7f3fe450b126367ec358be9b4cc04e82fa8c7", "title": "Maximal Likelihood Correspondence Estimation for Face Recognition Across Pose", "addresses": [{"address": "OMRON Corporation, Kyoto, Japan", "lat": "35.01163630", "lng": "135.76802940", "type": "company"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/TIP.2014.2351265"}, {"id": "ffa23a8c988e57cf5fc21b56b522a4ee68f2f362", "title": "Social game retrieval from unstructured videos", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/ffa2/3a8c988e57cf5fc21b56b522a4ee68f2f362.pdf"}, {"id": "307c5c0a61e318a65bd65af694ce89c275fd7299", "title": "Face Mis-alignment Analysis by Multiple-Instance Subspace", "addresses": [{"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/307c/5c0a61e318a65bd65af694ce89c275fd7299.pdf"}, {"id": "43b6fb3146cb92bc36a2aab1368d8665af106a87", "title": "ASePPI, an adaptive scrambling enabling privacy protection and intelligibility in H.264/AVC", "addresses": [{"address": "EURECOM", "lat": "43.61438600", "lng": "7.07112500", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.23919/EUSIPCO.2017.8081347"}, {"id": "4b605e6a9362485bfe69950432fa1f896e7d19bf", "title": "A Comparison of Human and Automated Face Verification Accuracy on Unconstrained Image Sets", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2016, "pdf": "http://biometrics.cse.msu.edu/Publications/Face/BlantonAllenMillerKalkaJain_CVPRWB2016_HID.pdf"}, {"id": "2cdf5952b5a1bea5d24917aa2f3fc2ee33568e9a", "title": "Autoencoding the retrieval relevance of medical images", "addresses": [{"address": "University of Waterloo", "lat": "43.47061295", "lng": "-80.54724732", "type": "edu"}], "year": 2015, "pdf": "https://arxiv.org/pdf/1507.01251v1.pdf"}, {"id": "2a392cbdb2ac977ad9f969659111e20bd0e9611f", "title": "Supplementary Material for Privacy Preserving Optics for Miniature Vision Sensors", "addresses": [{"address": "University of Florida", "lat": "29.63287840", "lng": "-82.34901330", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/2a39/2cbdb2ac977ad9f969659111e20bd0e9611f.pdf"}, {"id": "55498d89f9eb0c9df9760f5e0e47a15ae7e92f25", "title": "Learning-based face hallucination in DCT domain", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2008, "pdf": "http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/264.pdf"}, {"id": "7d61b70d922d20c52a4e629b09465076af71ddfd", "title": "Nonnegative class-specific entropy component analysis with adaptive step search criterion", "addresses": [{"address": "University of Macau", "lat": "22.12401870", "lng": "113.54510901", "type": "edu"}], "year": 2011, "pdf": "https://doi.org/10.1007/s10044-011-0258-2"}, {"id": "bde276015ba6677f0ec5fbfc97d5c57daca9d391", "title": "An Evaluation of Face and Ear Biometrics", "addresses": [{"address": "University of South Florida", "lat": "28.05999990", "lng": "-82.41383619", "type": "edu"}, {"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/bde2/76015ba6677f0ec5fbfc97d5c57daca9d391.pdf"}, {"id": "856cc83a3121de89d4a6d9283afbcd5d7ef7aa2b", "title": "Image-to-Set Face Recognition Using Locality Repulsion Projections and Sparse Reconstruction-Based Similarity Measure", "addresses": [{"address": "Advanced Digital Sciences Center, Singapore", "lat": "1.30372570", "lng": "103.77377630", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6417014"}, {"id": "1a5a79b4937b89420049bc279a7b7f765d143881", "title": "Are Rich People Perceived as More Trustworthy? Perceived Socioeconomic Status Modulates Judgments of Trustworthiness and Trust Behavior Based on Facial Appearance", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}, {"address": "Virginia Commonwealth University", "lat": "37.54821500", "lng": "-77.45306424", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/1a5a/79b4937b89420049bc279a7b7f765d143881.pdf"}, {"id": "759a3b3821d9f0e08e0b0a62c8b693230afc3f8d", "title": "Attribute and simile classifiers for face verification", "addresses": [{"address": "Columbia University", "lat": "40.84198360", "lng": "-73.94368971", "type": "edu"}], "year": 2009, "pdf": "http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf"}, {"id": "871e6c1de2e0ba86bad8975b8411ad76a6a9aef9", "title": "Geometric Modeling of 3D-Face Features and Its Applications", "addresses": [{"address": "Indian Institute of Technology Kanpur", "lat": "26.51318800", "lng": "80.23651945", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/871e/6c1de2e0ba86bad8975b8411ad76a6a9aef9.pdf"}, {"id": "8aa85d2f81d7496cf7105ee0a3785f140ddaa367", "title": "Efficient processing of MRFs for unconstrained-pose face recognition", "addresses": [{"address": "Urmia University", "lat": "37.52914535", "lng": "45.04886077", "type": "edu"}, {"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2013, "pdf": "http://www.csis.pace.edu/~ctappert/dps/2013BTAS/Papers/Paper%2019/PID2859743.pdf"}, {"id": "674e739709537f0e562b6cf114f15a5cc57fde7e", "title": "Nonsubsampled Contourlet Transform Based Descriptors for Gender Recognition", "addresses": [{"address": "King Saud University", "lat": "24.72464030", "lng": "46.62335012", "type": "edu"}], "year": 2014, "pdf": "http://www.cse.unr.edu/~bebis/CGIV2014.pdf"}, {"id": "062cea54e5d58ee41aea607cbf2ba0cf457aa4e7", "title": "The DIARETDB1 Diabetic Retinopathy Database and Evaluation Protocol", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/062c/ea54e5d58ee41aea607cbf2ba0cf457aa4e7.pdf"}, {"id": "555f75077a02f33a05841f9b63a1388ec5fbcba5", "title": "A Survey on Periocular Biometrics Research", "addresses": [{"address": "Halmstad University", "lat": "56.66340325", "lng": "12.87929727", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1810.03360.pdf"}, {"id": "892db59add66fc581ae1a7338ff8bd6b7aa0f2b4", "title": "FPGA-based Normalization for Modified Gram-Schmidt Orthogonalization", "addresses": [{"address": "New Jersey Institute of Technology", "lat": "40.74230250", "lng": "-74.17928172", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/892d/b59add66fc581ae1a7338ff8bd6b7aa0f2b4.pdf"}, {"id": "07f31bef7a7035792e3791473b3c58d03928abbf", "title": "Lessons from collecting a million biometric samples", "addresses": [{"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}, {"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1016/j.imavis.2016.08.004"}, {"id": "019f1462c1b7101100334e4c421d35feea612492", "title": "Running Head : UNFAMILIAR FACE MATCHING The Effects of External Features and Time Pressure on Unfamiliar Face Matching", "addresses": [{"address": "University of Adelaide", "lat": "-34.91892260", "lng": "138.60423668", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/019f/1462c1b7101100334e4c421d35feea612492.pdf"}, {"id": "10c79df4f44b5e4c08f984f34370d292f31ef309", "title": "Multi-Modal 2D and 3D Biometrics for Face Recognition", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/10c7/9df4f44b5e4c08f984f34370d292f31ef309.pdf"}, {"id": "3514f66f155c271981a734f1523572edcd8fd10e", "title": "A complementary local feature descriptor for face identification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}, {"address": "University of Campinas", "lat": "-27.59539950", "lng": "-48.61542180", "type": "edu"}], "year": 2012, "pdf": "http://www.umiacs.umd.edu/~jhchoi/paper/wacv2012_slide.pdf"}, {"id": "aa4d1ad6fd2dbc05139b8121b500c2b1f6b35bec", "title": "Grassmann Registration Manifolds for Face Recognition", "addresses": [{"address": "Colorado State University", "lat": "40.57093580", "lng": "-105.08655256", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/aa4d/1ad6fd2dbc05139b8121b500c2b1f6b35bec.pdf"}, {"id": "c79cf7f61441195404472102114bcf079a72138a", "title": "Pose-Invariant 2 D Face Recognition by Matching Using Graphical Models", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/9704/8d901389535b122f82a6a949bd8f596790f2.pdf"}, {"id": "1acf8970598bb2443fd2dd42ceeca1eb3f2fc613", "title": "Boosting Statistical Local Feature Based Classifiers for Face Recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": "2005", "pdf": "https://pdfs.semanticscholar.org/1acf/8970598bb2443fd2dd42ceeca1eb3f2fc613.pdf"}, {"id": "a489a7951c7848ebae5a99ac590c016359a85434", "title": "Attribute-Guided Sketch Generation", "addresses": [{"address": "University of Trento", "lat": "46.06588360", "lng": "11.11598940", "type": "edu"}, {"address": "Huazhong University of Science and Technology", "lat": "30.50975370", "lng": "114.40628810", "type": "edu"}, {"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}, {"address": "University of Michigan", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu"}], "year": "2019", "pdf": "https://arxiv.org/pdf/1901.09774.pdf"}, {"id": "0b55b31765f101535eac0d50b9da377f82136d2f", "title": "Biometric binary string generation with detection rate optimized bit allocation", "addresses": [{"address": "University of Twente", "lat": "52.23801390", "lng": "6.85667610", "type": "edu"}], "year": 2008, "pdf": "http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/WorkShops/data/papers/163.pdf"}, {"id": "bd7477c250f01f63f438c4f3bebe374caf4b86ba", "title": "Real-time Face and Hand Detection for Videoconferencing on a Mobile Device", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/bd74/77c250f01f63f438c4f3bebe374caf4b86ba.pdf"}, {"id": "9039b8097a78f460db9718bc961fdc7d89784092", "title": "3D Face Recognition Based on Local Shape Patterns and Sparse Representation Classifier", "addresses": [{"address": "Beihang University", "lat": "39.98083330", "lng": "116.34101249", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/9039/b8097a78f460db9718bc961fdc7d89784092.pdf"}, {"id": "ee458bee26e6371f9347b1972bbc9dc26b2f3713", "title": "Stacking-based deep neural network: Deep analytic network on convolutional spectral histogram features", "addresses": [{"address": "Yonsei University", "lat": "37.56004060", "lng": "126.93692480", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1703.01396.pdf"}, {"id": "80d42f74ee9bf03f3790c8d0f5a307deffe0b3b7", "title": "Learning Kernel Extended Dictionary for Face Recognition", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TNNLS.2016.2522431"}, {"id": "2af19b5ff2ca428fa42ef4b85ddbb576b5d9a5cc", "title": "Multi-Region Probabilistic Histograms for Robust and Scalable Identity Inference", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/2af1/9b5ff2ca428fa42ef4b85ddbb576b5d9a5cc.pdf"}, {"id": "778c1e95b6ea4ccf89067b83364036ab08797256", "title": "Exploring Patterns of Gradient Orientations and Magnitudes for Face Recognition", "addresses": [{"address": "VESALIS SAS, France", "lat": "45.75976430", "lng": "3.13102130", "type": "company"}], "year": 2013, "pdf": "https://doi.org/10.1109/TIFS.2012.2224866"}, {"id": "b9504e4a2f40f459b5e83143e77f4972c7888445", "title": "Experimental Analysis of Face Recognition on Still and CCTV Images", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": 2008, "pdf": "http://conradsanderson.id.au/pdfs/chen_avss_2008.pdf"}, {"id": "1da1299088a6bf28167c58bbd46ca247de41eb3c", "title": "Face identification from a single example image based on Face-Specific Subspace (FSS)", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2002, "pdf": "https://doi.org/10.1109/ICASSP.2002.5745055"}, {"id": "3ca25a9e906b851df01a53f4443d66978a0243b8", "title": "Improved Super-Resolution through Residual Neighbor Embedding", "addresses": [{"address": "Fudan University", "lat": "31.30104395", "lng": "121.50045497", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/3ca2/5a9e906b851df01a53f4443d66978a0243b8.pdf"}, {"id": "64fd48fae4d859583c4a031b51ce76ecb5de614c", "title": "Illuminated face normalization technique by using wavelet fusion and local binary patterns", "addresses": [{"address": "Yonsei University", "lat": "37.56004060", "lng": "126.93692480", "type": "edu"}, {"address": "Multimedia University", "lat": "2.92749755", "lng": "101.64185301", "type": "edu"}], "year": 2008, "pdf": "https://doi.org/10.1109/ICARCV.2008.4795556"}, {"id": "70d2ab1af0edd5c0a30d576a5d4aa397c4f92d3e", "title": "Elastic preserving projections based on L1-norm maximization", "addresses": [{"address": "Beihang University", "lat": "39.98083330", "lng": "116.34101249", "type": "edu"}], "year": "2018", "pdf": "http://doi.org/10.1007/s11042-018-5608-2"}, {"id": "9cda3e56cec21bd8f91f7acfcefc04ac10973966", "title": "Periocular biometrics: databases, algorithms and directions", "addresses": [{"address": "Halmstad University", "lat": "56.66340325", "lng": "12.87929727", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/IWBF.2016.7449688"}, {"id": "569008018f0b9c4abb8b5c662a6710a1fc38b5a6", "title": "Face Similarity Space as Perceived by Humans and Artificial Systems", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 1998, "pdf": "http://pdfs.semanticscholar.org/5690/08018f0b9c4abb8b5c662a6710a1fc38b5a6.pdf"}, {"id": "76dff7008d9b8bf44ec5348f294d5518877c6182", "title": "Discrete area filters in accurate detection of faces and facial features", "addresses": [{"address": "Warsaw University of Technology", "lat": "52.22165395", "lng": "21.00735776", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1016/j.imavis.2014.09.004"}, {"id": "4bc2352b087bdc99ef5f00453e5d2272d522524c", "title": "Investigating the Impact of Face Categorization on Recognition Performance", "addresses": [{"address": "University of Nevada", "lat": "39.54694490", "lng": "-119.81346566", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/4bc2/352b087bdc99ef5f00453e5d2272d522524c.pdf"}, {"id": "6250781bb606041fdc1621ba08aee541bfb1285b", "title": "Ear Biometrics Using 2D and 3D Images", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": 2005, "pdf": "http://www.cse.nd.edu/Reports/2004/TR-2004-31.pdf"}, {"id": "f6fa68847e0ce7fda05a9c73ebcb484f0b42a9af", "title": "Face Recognition Across Pose and Illumination", "addresses": [{"address": "Robotics Institute", "lat": "13.65450525", "lng": "100.49423171", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/f6fa/68847e0ce7fda05a9c73ebcb484f0b42a9af.pdf"}, {"id": "3a34c622c1af4b181e99d4a58f7870314944d2c4", "title": "D View - Invariant Face Recognition Using a Hierarchical Pose - Normalization Strategy", "addresses": [{"address": "McGill University", "lat": "45.50397610", "lng": "-73.57496870", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/3a34/c622c1af4b181e99d4a58f7870314944d2c4.pdf"}, {"id": "ece80165040e9d8304c5dd808a6cdb29c8ecbf5b", "title": "Looking at People Using Partial Least Squares", "addresses": [{"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/a2f6/8e5898364ac7c1d4691d23fab716ad672712.pdf"}, {"id": "ae1de0359f4ed53918824271c888b7b36b8a5d41", "title": "Low-cost Automatic Inpainting for Artifact Suppression in Facial Images", "addresses": [{"address": "University of Groningen", "lat": "53.21967825", "lng": "6.56251482", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/ae1d/e0359f4ed53918824271c888b7b36b8a5d41.pdf"}, {"id": "699be9152895977b0b272887320d543c9c7f6157", "title": "Artistic Illumination Transfer for Portraits", "addresses": [{"address": "Beihang University", "lat": "39.98083330", "lng": "116.34101249", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/699b/e9152895977b0b272887320d543c9c7f6157.pdf"}, {"id": "1e46d0714398904e557f27022908121fa8a7902f", "title": "Baseline Evaluations on the CAS-PEAL-R1 Face Database", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/1e46/d0714398904e557f27022908121fa8a7902f.pdf"}, {"id": "bec31269632c17206deb90cd74367d1e6586f75f", "title": "Large-scale Datasets: Faces with Partial Occlusions and Pose Variations in the Wild", "addresses": [{"address": "Wayne State University", "lat": "42.35775700", "lng": "-83.06286711", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/bec3/1269632c17206deb90cd74367d1e6586f75f.pdf"}, {"id": "e08038b14165536c52ffe950d90d0f43be9c8f15", "title": "Smart Augmentation Learning an Optimal Data Augmentation Strategy", "addresses": [{"address": "National University of Ireland Galway", "lat": "53.27639715", "lng": "-9.05829961", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1703.08383.pdf"}, {"id": "edf01e1c84e2f80500fd74da69f428617f2a1665", "title": "Gender recognition from faces using bandlet and local binary patterns", "addresses": [{"address": "King Saud University", "lat": "24.72464030", "lng": "46.62335012", "type": "edu"}], "year": 2013, "pdf": "http://www.cse.unr.edu/~bebis/IWSSIP2013.pdf"}, {"id": "6b3e360b80268fda4e37ff39b7f303e3684e8719", "title": "FACE RECOGNITION FROM SKETCHES USING ADVANCED CORRELATION FILTERS USING HYBRID EIGENANALYSIS FOR FACE SYNTHESIS", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2006", "pdf": null}, {"id": "987feaa36f3bb663ac9fa767718c6a90ea0dab3f", "title": "A Distributed System for Supporting Spatio-temporal Analysis on Large-scale Camera Networks", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}, {"address": "University of Stuttgart", "lat": "48.90953380", "lng": "9.18318920", "type": "edu"}, {"address": "SUNY Buffalo", "lat": "42.93362780", "lng": "-78.88394479", "type": "edu"}], "year": "2012", "pdf": "https://pdfs.semanticscholar.org/987f/eaa36f3bb663ac9fa767718c6a90ea0dab3f.pdf"}, {"id": "2feb7c57d51df998aafa6f3017662263a91625b4", "title": "Feature Selection for Intelligent Transportation Systems", "addresses": [{"address": "University of Newcastle", "lat": "-33.35788990", "lng": "151.37834708", "type": "edu"}], "year": "2014", "pdf": "https://pdfs.semanticscholar.org/d344/9eaaf392fd07b676e744410049f4095b4b5c.pdf"}, {"id": "c3558f67b3f4b618e6b53ce844faf38240ee7cd7", "title": "Collaboratively Weighting Deep and Classic Representation via $l_2$ Regularization for Image Classification", "addresses": [{"address": "University of Macau", "lat": "22.12401870", "lng": "113.54510901", "type": "edu"}, {"address": "University of Southampton", "lat": "50.89273635", "lng": "-1.39464295", "type": "edu"}, {"address": "Jiangsu University", "lat": "32.20302965", "lng": "119.50968362", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1802.07589.pdf"}, {"id": "a7678cce6bfca4a34feee5564c87c80fe192a0fd", "title": "The Weakly Identifying System for Doorway Monitoring", "addresses": [{"address": "Duke University", "lat": "35.99905220", "lng": "-78.92906290", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/a767/8cce6bfca4a34feee5564c87c80fe192a0fd.pdf"}, {"id": "550289407a642e81e1ef9dc0476117ed7816e9b5", "title": "Conditional Infomax Learning: An Integrated Framework for Feature Extraction and Fusion", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "Microsoft Research Asia", "lat": "39.97721700", "lng": "116.33763200", "type": "company"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/5502/89407a642e81e1ef9dc0476117ed7816e9b5.pdf"}, {"id": "6577d30abd8bf5b21901572504bd82101a7eed75", "title": "Ear Biometrics in Human", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/6577/d30abd8bf5b21901572504bd82101a7eed75.pdf"}, {"id": "65293ecf6a4c5ab037a2afb4a9a1def95e194e5f", "title": "Face , Age and Gender Recognition using Local Descriptors", "addresses": [{"address": "University of Ottawa", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/6529/3ecf6a4c5ab037a2afb4a9a1def95e194e5f.pdf"}, {"id": "ac9516a589901f1421e8ce905dd8bc5b689317ca", "title": "A Practical Framework for Executing Complex Queries over Encrypted Multimedia Data", "addresses": [{"address": "University of Texas at Dallas", "lat": "32.98207990", "lng": "-96.75662780", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/ac95/16a589901f1421e8ce905dd8bc5b689317ca.pdf"}, {"id": "63a584487beb7382cad8ed70020f108ded5bf076", "title": "Face Detection and Modeling for Recognition", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2002", "pdf": "https://pdfs.semanticscholar.org/2bb3/4f45b1f0ae2b602a6f25f1966cd0f84e3f5f.pdf"}, {"id": "885c37f94e9edbbb2177cfba8cb1ad840b2a5f20", "title": "Simultaneous Local Binary Feature Learning and Encoding for Homogeneous and Heterogeneous Face Recognition", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8006255"}, {"id": "279acfde0286bb76dd7717abebc3c8acf12d2c5f", "title": "Local Gradient Order Pattern for Face Representation and Recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2014, "pdf": "http://www.cbsr.ia.ac.cn/users/zlei/papers/ICPR2014/Lei-ICPR-14.pdf"}, {"id": "17f472a7cb25bf1e76ff29181b1d40585e2ae5c1", "title": "Fusing binary templates for multi-biometric cryptosystems", "addresses": [{"address": "Hong Kong Baptist University", "lat": "22.38742010", "lng": "114.20822220", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/BTAS.2015.7358764"}, {"id": "cf671dc13696d1643cc1f32f7d32c329b16cd745", "title": "Multiple Fisher Classifiers Combination for Face Recognition based on Grouping AdaBoosted Gabor Features", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/cf67/1dc13696d1643cc1f32f7d32c329b16cd745.pdf"}, {"id": "b53485dbdd2dc5e4f3c7cff26bd8707964bb0503", "title": "Pose-Invariant Face Alignment via CNN-Based Dense 3D Model Fitting", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2017", "pdf": "http://doi.org/10.1007/s11263-017-1012-z"}, {"id": "9fbcf40b0649c03ba0f38f940c34e7e6c9e04c03", "title": "A review on Gabor wavelets for face recognition", "addresses": [{"address": "University of Nottingham", "lat": "52.93874280", "lng": "-1.20029569", "type": "edu"}], "year": 2006, "pdf": "https://doi.org/10.1007/s10044-006-0033-y"}, {"id": "d103df0381582003c7a8930b68047b4f26d9b613", "title": "Quality Assessment and Restoration of Face Images in Long Range/High Zoom Video", "addresses": [{"address": "University of Tennessee", "lat": "35.95424930", "lng": "-83.93073950", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/d103/df0381582003c7a8930b68047b4f26d9b613.pdf"}, {"id": "ce0aa94c79f60c35073f434a7fd6987180f81527", "title": "Achieving Anonymity against Major Face Recognition Algorithms", "addresses": [{"address": "Ruhr-University Bochum", "lat": "51.44415765", "lng": "7.26096541", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/ce0a/a94c79f60c35073f434a7fd6987180f81527.pdf"}, {"id": "56fb30b24e7277b47d366ca2c491749eee4d6bb1", "title": "Using Bayesian statistics and Gabor Wavelets for recognition of human faces", "addresses": [{"address": "Indian Institute of Science Bangalore", "lat": "13.02223470", "lng": "77.56718325", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICAPR.2015.7050658"}, {"id": "963a004e208ce4bd26fa79a570af61d31651b3c3", "title": "Computational methods for modeling facial aging: A survey", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2009, "pdf": "https://doi.org/10.1016/j.jvlc.2009.01.011"}, {"id": "4a18adc7f5a090a041528a88166671248703f6e0", "title": "Illumination Normalization for Robust Face Recognition Against Varying Lighting Conditions", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/c2c3/ecd39dd24e2b57ae6023536cc1fcd29d184a.pdf"}, {"id": "301662c2a6ed86e48f21c1d24bfc67b403201b0c", "title": "Repetition Suppression in Ventral Visual Cortex Is Diminished as a Function of Increasing Autistic Traits", "addresses": [{"address": "University of Western Australia", "lat": "-31.95040445", "lng": "115.79790037", "type": "edu"}, {"address": "University of Cambridge", "lat": "52.17638955", "lng": "0.14308882", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/688d/0dddf90995ba6248de148e58030cb8f558e8.pdf"}, {"id": "c48b2582429cc9ae427a264eed469d08b571acde", "title": "Facial Peculiarity Retrieval via Deep Neural Networks Fusion", "addresses": [{"address": "National University of Defense Technology, China", "lat": "28.22902090", "lng": "112.99483204", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/c48b/2582429cc9ae427a264eed469d08b571acde.pdf"}, {"id": "878ec66a3bb87f23f3f8fd96ee504f79e6100a95", "title": "THESIS EVALUATING THE PERFORMANCE OF IPHOTO FACIAL RECOGNITION AT THE BIOMETRIC VERIFICATION TASK", "addresses": [{"address": "Colorado State University", "lat": "40.57093580", "lng": "-105.08655256", "type": "edu"}], "year": "2012", "pdf": "https://pdfs.semanticscholar.org/878e/c66a3bb87f23f3f8fd96ee504f79e6100a95.pdf"}, {"id": "124f6992202777c09169343d191c254592e4428c", "title": "Visual Psychophysics for Making Face Recognition Algorithms More Explainable", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}, {"address": "Harvard University", "lat": "42.36782045", "lng": "-71.12666653", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.07140.pdf"}, {"id": "4156f9fc5983b09eb97ad3d9abc248b15440b955", "title": "2 Subspace Methods for Face Recognition : Singularity , Regularization , and Robustness", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/4156/f9fc5983b09eb97ad3d9abc248b15440b955.pdf"}, {"id": "533d70c914a4b84ec7f35ef6c74bb3acba4c26fc", "title": "Blaming the victims of your own mistakes: How visual search accuracy influences evaluation of stimuli.", "addresses": [{"address": "University of Iceland", "lat": "64.13727400", "lng": "-21.94561454", "type": "edu"}, {"address": "University College London", "lat": "51.52316070", "lng": "-0.12820370", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/533d/70c914a4b84ec7f35ef6c74bb3acba4c26fc.pdf"}, {"id": "ccd7a6b9f23e983a3fc6a70cc3b9c9673d70bf2c", "title": "Symmetrical Two-Dimensional PCA with Image Measures in Face Recognition", "addresses": [{"address": "University of Electronic Science and Technology of China", "lat": "40.01419050", "lng": "-83.03091430", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/ccd7/a6b9f23e983a3fc6a70cc3b9c9673d70bf2c.pdf"}, {"id": "60a006bdfe5b8bf3243404fae8a5f4a9d58fa892", "title": "A reference-based framework for pose invariant face recognition", "addresses": [{"address": "University of North Carolina at Chapel Hill", "lat": "35.91139710", "lng": "-79.05045290", "type": "edu"}], "year": 2015, "pdf": "http://alumni.cs.ucr.edu/~mkafai/papers/Paper_bwild.pdf"}, {"id": "19fed85436eff43e60b9476e3d8742dfedba6384", "title": "A Novel Multiple Kernel Sparse Representation based Classification for Face Recognition", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/19fe/d85436eff43e60b9476e3d8742dfedba6384.pdf"}, {"id": "244c5f88186475bc3b051be8ebb6422e4b8de707", "title": "Video from nearly still: An application to low frame-rate gait recognition", "addresses": [{"address": "Osaka University", "lat": "34.80809035", "lng": "135.45785218", "type": "edu"}], "year": 2012, "pdf": "http://www.am.sanken.osaka-u.ac.jp/~mansur/files/cvpr2012.pdf"}, {"id": "fc798314994bf94d1cde8d615ba4d5e61b6268b6", "title": "Face Recognition : face in video , age invariance , and facial marks", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/fc79/8314994bf94d1cde8d615ba4d5e61b6268b6.pdf"}, {"id": "977bedd692c240c162481ef769b31e0f5455469a", "title": "A Two-Step Approach to Hallucinating Faces: Global Parametric Model and Local Nonparametric Model", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2001, "pdf": "http://pdfs.semanticscholar.org/977b/edd692c240c162481ef769b31e0f5455469a.pdf"}, {"id": "9e1c3c7f1dce662a877727a821bdf41c5cd906bb", "title": "Learning Disentangling and Fusing Networks for Face Completion Under Structured Occlusions", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/9e1c/3c7f1dce662a877727a821bdf41c5cd906bb.pdf"}, {"id": "4308f53244bbb6a1e22ba1d39e079e5065a51364", "title": "Ethnicity Identification from Face Images", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/4308/f53244bbb6a1e22ba1d39e079e5065a51364.pdf"}, {"id": "5b1f3a60518c3a552de09ed51646764551f4cb84", "title": "Multiple cue integration in transductive confidence machines for head pose classification", "addresses": [{"address": "Arizona State University", "lat": "33.30715065", "lng": "-111.67653157", "type": "edu"}], "year": 2008, "pdf": "http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/WorkShops/data/papers/121.pdf"}, {"id": "a96a7a381872ae40179ded0d79f905da0455d9d1", "title": "Segmentation of Saimaa Ringed Seals for Identification Purposes", "addresses": [{"address": "Monash University Malaysia", "lat": "3.06405715", "lng": "101.60059740", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/a96a/7a381872ae40179ded0d79f905da0455d9d1.pdf"}, {"id": "9a7fcd09afd8c3ae227e621795168c94ffbac71d", "title": "Action unit recognition transfer across datasets", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": 2011, "pdf": "http://mplab.ucsd.edu/wp-content/uploads/2011-WuEtAl-FERA-DatasetTransfer.pdf"}, {"id": "f2d813a987f0aed5056d5eccbadee8738bbd0a4b", "title": "Fast Matching by 2 Lines of Code for Large Scale Face Recognition Systems", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/f2d8/13a987f0aed5056d5eccbadee8738bbd0a4b.pdf"}, {"id": "8489236bbbb3298f4513c7e005a85ba7a48cc946", "title": "Vision and Touch for Grasping", "addresses": [{"address": "Ruhr-University Bochum", "lat": "51.44415765", "lng": "7.26096541", "type": "edu"}], "year": 2000, "pdf": "http://pdfs.semanticscholar.org/8489/236bbbb3298f4513c7e005a85ba7a48cc946.pdf"}, {"id": "1dede3e0f2e0ed2984aca8cd98631b43c3f887b9", "title": "A vote of confidence based interest point detector", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2013, "pdf": "http://www3.ntu.edu.sg/home/EXDJiang/ICASSP13-3.pdf"}, {"id": "4c56f119ebf7c71f2a83e4d79e8d88314b8e6044", "title": "An other-race effect for face recognition algorithms", "addresses": [{"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}, {"address": "University of Texas at Dallas", "lat": "32.98207990", "lng": "-96.75662780", "type": "edu"}], "year": 2011, "pdf": "http://www.nist.gov/customcf/get_pdf.cfm?pub_id=906254"}, {"id": "1b67053c682dcbc9dc368de89fff32f787320a96", "title": "Quality-Driven Face Occlusion Detection and Recovery", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2007, "pdf": "http://mmlab.ie.cuhk.edu.hk/archive/2007/CVPR07_face01.pdf"}, {"id": "86274e426bfe962d5cb994d5d9c6829f64410c32", "title": "Face Recognition in Different Subspaces: A Comparative Study", "addresses": [{"address": "University of Ljubljana", "lat": "46.05015580", "lng": "14.46907327", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/8627/4e426bfe962d5cb994d5d9c6829f64410c32.pdf"}, {"id": "4c170a0dcc8de75587dae21ca508dab2f9343974", "title": "FaceTracer: A Search Engine for Large Collections of Images with Faces", "addresses": [{"address": "Columbia University", "lat": "40.84198360", "lng": "-73.94368971", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf"}, {"id": "4276eb27e2e4fc3e0ceb769eca75e3c73b7f2e99", "title": "Face Recognition From Video", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/4276/eb27e2e4fc3e0ceb769eca75e3c73b7f2e99.pdf"}, {"id": "63f9f3f0e1daede934d6dde1a84fb7994f8929f0", "title": "Local Gabor binary pattern histogram sequence (LGBPHS): a novel non-statistical model for face representation and recognition", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2005, "pdf": "http://www.jdl.ac.cn/user/sgshan/pub/ICCV2005-ZhangShan-LGBP.pdf"}, {"id": "39e1fb5539737a17ae5fc25de30377dfaecfa100", "title": "Appearance-based face recognition and light-fields", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2004, "pdf": "https://www.ri.cmu.edu/pub_files/pub4/gross_ralph_2004_1/gross_ralph_2004_1.pdf"}, {"id": "e19ba2a6ce70fb94d31bb0b39387aa734e6860b0", "title": "A Different Approach to Appearance \u2013based Statistical Method for Face Recognition Using Median", "addresses": [{"address": "Anna University", "lat": "13.01058380", "lng": "80.23537360", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/e19b/a2a6ce70fb94d31bb0b39387aa734e6860b0.pdf"}, {"id": "528a6698911ff30aa648af4d0a5cf0dd9ee90b5c", "title": "Is All Face Processing Holistic ? The View from UCSD", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}, {"address": "University of Iowa", "lat": "41.66590000", "lng": "-91.57310307", "type": "edu"}], "year": "2003", "pdf": "https://pdfs.semanticscholar.org/528a/6698911ff30aa648af4d0a5cf0dd9ee90b5c.pdf"}, {"id": "bdc3546ceee0c2bda9debff7de9aa7d53a03fe7d", "title": "Modeling distance functions induced by face recognition algorithms", "addresses": [{"address": "University of South Florida", "lat": "28.05999990", "lng": "-82.41383619", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/bdc3/546ceee0c2bda9debff7de9aa7d53a03fe7d.pdf"}, {"id": "4c5566d4cb47f4db45d46c6aaf324d6057b580bc", "title": "Gender recognition from face images with trainable COSFIRE filters", "addresses": [{"address": "University of Malta", "lat": "35.90232260", "lng": "14.48341890", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AVSS.2016.7738068"}, {"id": "328bfd1d0229bc4973277f893abd1eb288159fc9", "title": "A review of the literature on the aging adult skull and face: implications for forensic science research and applications.", "addresses": [{"address": "University of North Carolina at Wilmington", "lat": "34.22498270", "lng": "-77.86907744", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/328b/fd1d0229bc4973277f893abd1eb288159fc9.pdf"}, {"id": "462fe97ce53e58c8e2cb01c925b46bcf3bb53eda", "title": "How features of the human face affect recognition: a statistical comparison of three face recognition algorithms", "addresses": [{"address": "Colorado State University", "lat": "40.57093580", "lng": "-105.08655256", "type": "edu"}], "year": 2004, "pdf": "http://www.cs.colostate.edu/~draper/papers/givens_cvpr04.pdf"}, {"id": "90ea3a35e946af97372c3f32a170b179fe8352aa", "title": "Discriminant Learning for Face Recognition", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/90ea/3a35e946af97372c3f32a170b179fe8352aa.pdf"}, {"id": "c95e379aab32a1611f1f549fd11a3e9498ab5dae", "title": "Constructing Benchmark Databases and Protocols for Medical Image Analysis: Diabetic Retinopathy", "addresses": [{"address": "Tampere University of Technology", "lat": "61.44964205", "lng": "23.85877462", "type": "edu"}, {"address": "University of Tampere", "lat": "61.49412325", "lng": "23.77920678", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/c95e/379aab32a1611f1f549fd11a3e9498ab5dae.pdf"}, {"id": "9264b390aa00521f9bd01095ba0ba4b42bf84d7e", "title": "Displacement Template with Divide-&-Conquer Algorithm for Significantly Improving Descriptor Based Face Recognition Approaches", "addresses": [{"address": "Aberystwyth University", "lat": "52.41073580", "lng": "-4.05295501", "type": "edu"}, {"address": "University of Northern British Columbia", "lat": "53.89256620", "lng": "-122.81471592", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/9264/b390aa00521f9bd01095ba0ba4b42bf84d7e.pdf"}, {"id": "5bb9540375ba9bba22f8a22ba2990cfe7ff6780c", "title": "Discriminant Analysis of Haar Features for Accurate Eye Detection", "addresses": [{"address": "New Jersey Institute of Technology", "lat": "40.74230250", "lng": "-74.17928172", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/5bb9/540375ba9bba22f8a22ba2990cfe7ff6780c.pdf"}, {"id": "916498961a51f56a592c3551b0acc25978571fa7", "title": "Optimal landmark detection using shape models and branch and bound", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": 2011, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126275"}, {"id": "01a19d3e902d7431f533f5f0b54510a7fb9bda23", "title": "A Practical Face Relighting Method for Directional Lighting Normalization", "addresses": [{"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/3521/15bbb399b94865a7d870d1cd1a79e42104b8.pdf"}, {"id": "022f38febc47818a010dc64ca54f6e137055cc88", "title": "3D face texture modeling from uncalibrated frontal and profile images", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2012, "pdf": "http://biometrics.cse.msu.edu/Publications/Face/HanJain_3DFaceTextureModeling_UncalibratedFrontalProfileImages_BTAS12.pdf"}, {"id": "b13014374863715c421ed92d3827fc7e09a3e47a", "title": "Rapid Correspondence Finding in Networks of Cortical Columns", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": "2006", "pdf": "https://pdfs.semanticscholar.org/fe31/8312fd51fc65d132084c3862c85f067e6edf.pdf"}, {"id": "8a55c385c8cf76cadaa28c7ab1fde9dc28577b08", "title": "Positive definite dictionary learning for region covariances", "addresses": [{"address": "University of Minnesota", "lat": "44.97308605", "lng": "-93.23708813", "type": "edu"}], "year": 2011, "pdf": "http://www-users.cs.umn.edu/~boley/publications/papers/ICCV2011.pdf"}, {"id": "d40cd10f0f3e64fd9b0c2728089e10e72bea9616", "title": "Enhancing Face Identification Using Local Binary Patterns and K-Nearest Neighbors", "addresses": [{"address": "Hangzhou Dianzi University", "lat": "30.31255250", "lng": "120.34309460", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/d40c/d10f0f3e64fd9b0c2728089e10e72bea9616.pdf"}, {"id": "ba9e967208976f24a09730af94086e7ae0417067", "title": "An Open Source Framework for Standardized Comparisons of Face Recognition Algorithms", "addresses": [{"address": "IDIAP Research Institute", "lat": "46.10923700", "lng": "7.08453549", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/f369/03d22a463876b895bbe37b5f9ad235a38edd.pdf"}, {"id": "4d527974512083712c9adf26a923b44d7e426b44", "title": "Impact of Image Quality on Performance: Comparison of Young and Elderly Fingerprints", "addresses": [{"address": "Purdue University", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/4d52/7974512083712c9adf26a923b44d7e426b44.pdf"}, {"id": "e96ce25d11296fce4e2ecc2da03bd207dc118724", "title": "Classification of face images using local iterated function systems", "addresses": [{"address": "Deakin University", "lat": "-38.19928505", "lng": "144.30365229", "type": "edu"}], "year": 2007, "pdf": "https://doi.org/10.1007/s00138-007-0095-x"}, {"id": "fcd2fb1ada96218dcc2547efa040e76416cc7066", "title": "Perceptual data mining: bootstrapping visual intelligence from tracking behavior", "addresses": [{"address": "Northwestern University", "lat": "42.05511640", "lng": "-87.67581113", "type": "edu"}, {"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/fcd2/fb1ada96218dcc2547efa040e76416cc7066.pdf"}, {"id": "97930609f1a5066fd437ed8a4e57abbfb1ae4b12", "title": "Best Practices in Testing and Reporting Performance of Biometric Devices", "addresses": [{"address": "San Jose State University", "lat": "37.33519080", "lng": "-121.88126008", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/bef4/03c136beaa6fd43fc3184d4666512daaf9e5.pdf"}, {"id": "985dc9b8b003483f6df363a8ce07dd8c89ced903", "title": "3D Morphable Face Model, a Unified Approach for Analysis and Synthesis of Images", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": "", "pdf": "http://pdfs.semanticscholar.org/985d/c9b8b003483f6df363a8ce07dd8c89ced903.pdf"}, {"id": "1057137d8ebbbfc4e816d74edd7ab04f61a893f8", "title": "Craniofacial Aging", "addresses": [{"address": "University of North Carolina Wilmington", "lat": "34.23755810", "lng": "-77.92701290", "type": "edu"}, {"address": "Virginia Commonwealth University", "lat": "37.54821500", "lng": "-77.45306424", "type": "edu"}], "year": "2008", "pdf": "https://pdfs.semanticscholar.org/1057/137d8ebbbfc4e816d74edd7ab04f61a893f8.pdf"}, {"id": "005d818ff8517669d62ba7b536e76b56698fa135", "title": "Neural Network-Based Face Detection", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 1996, "pdf": "http://pdfs.semanticscholar.org/4d7e/e94f164cce28a8bfef4417e9a99265b02b54.pdf"}, {"id": "0c85d1b384bb6e2d5d6e4db5461a7101ceed6808", "title": "Engineering Privacy in Public: Confounding Face Recognition", "addresses": [{"address": "University of Pennsylvania", "lat": "39.94923440", "lng": "-75.19198985", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/0ff8/d39a962ed902e1c995815ade265ea903d218.pdf"}, {"id": "9107543d9a9d915c92fe4139932c5d818cfc187d", "title": "Investigation of New Techniques for Face Detection", "addresses": [{"address": "Virginia Polytechnic Institute and State University", "lat": "37.21872455", "lng": "-80.42542519", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/9107/543d9a9d915c92fe4139932c5d818cfc187d.pdf"}, {"id": "b3e856729f89b082b4108561479ff09394bb6553", "title": "Pose Robust Video - Based Face Recognition", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/b3e8/56729f89b082b4108561479ff09394bb6553.pdf"}, {"id": "00d6e5a1b347463f6aeb08a10cd912273c9d1347", "title": "Face Recognition Vendor Test 2002 : Evaluation Report", "addresses": [{"address": "DARPA", "lat": "38.88334130", "lng": "-77.10459770", "type": "mil"}, {"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/00d6/e5a1b347463f6aeb08a10cd912273c9d1347.pdf"}, {"id": "2fd1c99edbb3d22cec4adc9ba9319cfc2360e903", "title": "Rotation Invariant Neural Network-Based Face Detection", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 1998, "pdf": "http://pdfs.semanticscholar.org/98c8/ca05ed5baff5b217c571ab5c5a0ee0706e27.pdf"}, {"id": "b6145d3268032da70edc9cfececa1f9ffa4e3f11", "title": "Face Recognition Using the Discrete Cosine Transform", "addresses": [{"address": "McGill University", "lat": "45.50397610", "lng": "-73.57496870", "type": "edu"}], "year": 2001, "pdf": "http://cnl.salk.edu/~zhafed/papers/fr_IJCV_2001.pdf"}, {"id": "01b73cfd803f0bdeab8bbfc26cd1ed110c762c91", "title": "Facial Recognition Technology A Survey of Policy and Implementation Issues", "addresses": [{"address": "Lancaster University", "lat": "54.00975365", "lng": "-2.78757491", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/01b7/3cfd803f0bdeab8bbfc26cd1ed110c762c91.pdf"}, {"id": "c9579768d142a7020d095090183805c98a2f78e5", "title": "The Bochum/USC Face Recognition System and How it Fared in the FERET Phase III Test", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": "", "pdf": "http://pdfs.semanticscholar.org/e30d/b2331efa48f6c60330d492210ed6395774f2.pdf"}, {"id": "42fe5666599f35b805657e829e8f9093ee95b908", "title": "Pose-Tolerant Face Recognition", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/42fe/5666599f35b805657e829e8f9093ee95b908.pdf"}, {"id": "29c7dfbbba7a74e9aafb6a6919629b0a7f576530", "title": "Automatic Facial Expression Analysis and Emotional Classification", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/29c7/dfbbba7a74e9aafb6a6919629b0a7f576530.pdf"}, {"id": "07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1", "title": "Large scale unconstrained open set face database", "addresses": [{"address": "University of Colorado at Colorado Springs", "lat": "38.89646790", "lng": "-104.80505940", "type": "edu"}], "year": 2013, "pdf": "http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf"}, {"id": "f7f19ac1c4e38c104045c306f5ddac6329193d8c", "title": "Measuring External Face Appearance for Face Classification", "addresses": [{"address": "Universitat Aut\u00f2noma de Barcelona", "lat": "41.50078110", "lng": "2.11143663", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/f7f1/9ac1c4e38c104045c306f5ddac6329193d8c.pdf"}, {"id": "57bd46b16644be40b2e0dc595c1aaa6abbadba89", "title": "Overview of Work in Empirical Evaluation of Computer Vision Algorithms", "addresses": [{"address": "University of South Florida", "lat": "28.05999990", "lng": "-82.41383619", "type": "edu"}, {"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/c3f7/6fe32a0ca448f1ce7004198827df48bf827b.pdf"}, {"id": "fc83a26beb38b17af737c4ff34141d0deea3a4e1", "title": "The Challenges of the Environment and the Human / Biometric Device Interaction on Biometric System Performance", "addresses": [{"address": "Purdue University", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/fc83/a26beb38b17af737c4ff34141d0deea3a4e1.pdf"}, {"id": "5ea51401eea9a50a16bd17471bfd559d2d989760", "title": "Robust Face Alignment Based on Hierarchical Classifier Network", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/5ea5/1401eea9a50a16bd17471bfd559d2d989760.pdf"}, {"id": "71644fab2275cfd6a8f770a26aba4e6228e85dec", "title": "Multi-View Discriminant Analysis", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2012, "pdf": "http://www.jdl.ac.cn/doc/2011/20131910365517756_2012_eccv_mnkan_mvda.pdf"}, {"id": "280bc9751593897091015aaf2cab39805768b463", "title": "Gender Perception From Faces Using Boosted LBPH (Local Binary Patten Histograms)", "addresses": [{"address": "COMSATS Institute of Information Technology, Lahore", "lat": "31.40063320", "lng": "74.21372960", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/280b/c9751593897091015aaf2cab39805768b463.pdf"}, {"id": "23b80dc704e25cf52b5a14935002fc083ce9c317", "title": "Learning Generative Models via Discriminative Approaches", "addresses": [{"address": "University of California, Los Angeles", "lat": "34.06877880", "lng": "-118.44500940", "type": "edu"}], "year": 2007, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2007.383035"}, {"id": "857ad04fca2740b016f0066b152bd1fa1171483f", "title": "Sample Images can be Independently Restored from Face Recognition Templates", "addresses": [{"address": "University of Ottawa", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/857a/d04fca2740b016f0066b152bd1fa1171483f.pdf"}, {"id": "87b81c8821a2cb9cdf26c75c1531717cab4b942f", "title": "Face Detection with Facial Features and Gender Classification Based On Support Vector Machine", "addresses": [{"address": "Manonmaniam Sundaranar University", "lat": "8.76554685", "lng": "77.65100445", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/87b8/1c8821a2cb9cdf26c75c1531717cab4b942f.pdf"}, {"id": "099ce5cb6f42bff5ad117852d62c5a07e6407b8a", "title": "Spectral Methods for Multi-Scale Feature Extraction and Data Clustering", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/099c/e5cb6f42bff5ad117852d62c5a07e6407b8a.pdf"}, {"id": "21358489b5ce0e94ff37792a8a5eea198e7272f3", "title": "Face Inpainting with Local Linear Representations", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/c0cc/2073cad539d979fc6f860177b531b45fafc1.pdf"}, {"id": "dc4e4b9c507e8be2d832faf64e5a2e8887115265", "title": "Face Retrieval Based on Robust Local Features and Statistical-Structural Learning Approach", "addresses": [{"address": "Tampere University of Technology", "lat": "61.44964205", "lng": "23.85877462", "type": "edu"}], "year": "2008", "pdf": "https://pdfs.semanticscholar.org/dc4e/4b9c507e8be2d832faf64e5a2e8887115265.pdf"}, {"id": "891d435fd1a070bb66225abfd62b2e2c5350e87c", "title": "Selective Feature Generation Method for Classification of Low-dimensional Data", "addresses": [{"address": "Dankook University", "lat": "37.32195750", "lng": "127.12507230", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/891d/435fd1a070bb66225abfd62b2e2c5350e87c.pdf"}, {"id": "854b1f0581f5d3340f15eb79452363cbf38c04c8", "title": "Directional Age-Primitive Pattern (DAPP) for Human Age Group Recognition and Age Estimation", "addresses": [{"address": "Kyung Hee University", "lat": "32.85363330", "lng": "-117.20352860", "type": "edu"}, {"address": "King Saud University", "lat": "24.72464030", "lng": "46.62335012", "type": "edu"}, {"address": "Institute of Information Technology", "lat": "23.72898990", "lng": "90.39826820", "type": "edu"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7903648"}, {"id": "cbb55f5885f9a0d0bfaa2c0bf5293ef45a04c5cd", "title": "Performance Characterisation of Face Recognition Algorithms and Their Sensitivity to Severe Illumination Changes", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": "2006", "pdf": "https://pdfs.semanticscholar.org/cbb5/5f5885f9a0d0bfaa2c0bf5293ef45a04c5cd.pdf"}, {"id": "d8896861126b7fd5d2ceb6fed8505a6dff83414f", "title": "In-plane Rotational Alignment of Faces by Eye and Eye-pair Detection", "addresses": [{"address": "University of Groningen", "lat": "53.21967825", "lng": "6.56251482", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/d889/6861126b7fd5d2ceb6fed8505a6dff83414f.pdf"}, {"id": "e1d1540a718bb7a933e21339f1a2d90660af7353", "title": "Discriminative Probabilistic Latent Semantic Analysis with Application to Single Sample Face Recognition", "addresses": [{"address": "Chongqing University", "lat": "29.50841740", "lng": "106.57858552", "type": "edu"}], "year": "2018", "pdf": "http://doi.org/10.1007/s11063-018-9852-2"}, {"id": "f12813073a7f894f82fe2b166893424edba7dc79", "title": "Unified Principal Component Analysis with generalized Covariance Matrix for face recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2008, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2008.4587375"}, {"id": "946c2036c940e77260ade031ba413ec9f2435985", "title": "PCA for Gender Estimation: Which Eigenvectors Contribute?", "addresses": [{"address": "Middle East Technical University", "lat": "39.87549675", "lng": "32.78553506", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/946c/2036c940e77260ade031ba413ec9f2435985.pdf"}, {"id": "a129c30b176820bf7f4756b4b4efc92d2a83f190", "title": "Older adults' associative memory is modified by manner of presentation at encoding and retrieval.", "addresses": [{"address": "Elon University", "lat": "36.10179560", "lng": "-79.50173300", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/a129/c30b176820bf7f4756b4b4efc92d2a83f190.pdf"}, {"id": "e1fac9e9427499d3758213daf1c781b9a42a3420", "title": "Face Image Retrieval Based on Probe Sketch Using SIFT Feature Descriptors", "addresses": [{"address": "National Institute of Technology, Karnataka", "lat": "13.01119095", "lng": "74.79498825", "type": "edu"}], "year": "2012", "pdf": "https://pdfs.semanticscholar.org/7c90/60a809bd28ef61421588f48e33f6eae6ddfd.pdf"}, {"id": "7735f63e5790006cb3d989c8c19910e40200abfc", "title": "Multispectral Imaging For Face Recognition Over Varying Illumination", "addresses": [{"address": "University of Tennessee", "lat": "35.95424930", "lng": "-83.93073950", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/7735/f63e5790006cb3d989c8c19910e40200abfc.pdf"}, {"id": "f909d04c809013b930bafca12c0f9a8192df9d92", "title": "Single Image Subspace for Face Recognition", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/f909/d04c809013b930bafca12c0f9a8192df9d92.pdf"}, {"id": "33abfe693258a4e00467494b11ee4d523379ab6b", "title": "Local Discriminant Embedding with Tensor Representation", "addresses": [{"address": "Hong Kong University of Science and Technology", "lat": "22.33863040", "lng": "114.26203370", "type": "edu"}], "year": 2006, "pdf": "http://www.cse.ust.hk/~dyyeung/paper/pdf/yeung.icip2006a.pdf"}, {"id": "a1997d89f544cc862c63a972ef364b2ff38982e9", "title": "Can SNOMED CT Changes Be Used as a Surrogate Standard for Evaluating the Performance of Its Auditing Methods?", "addresses": [{"address": "University of Kentucky", "lat": "38.03337420", "lng": "-84.50177580", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/a199/7d89f544cc862c63a972ef364b2ff38982e9.pdf"}, {"id": "aeb64f88302b9d4d23ee13ece5c9842dd43dc37f", "title": "Recollection and confidence in two-alternative forced choice episodic recognition", "addresses": [{"address": "University of Newcastle", "lat": "-33.35788990", "lng": "151.37834708", "type": "edu"}], "year": "2009", "pdf": "https://pdfs.semanticscholar.org/aeb6/4f88302b9d4d23ee13ece5c9842dd43dc37f.pdf"}, {"id": "e392816ec3e0b131bbab06431ac85b14afa7d656", "title": "A Simple and Efficient Supervised Method for Spatially Weighted PCA in Face Image Analysis", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/e392/816ec3e0b131bbab06431ac85b14afa7d656.pdf"}, {"id": "3e76496aa3840bca2974d6d087bfa4267a390768", "title": "Dictionary Learning in Optimal Metric Subspace", "addresses": [{"address": "Xidian University", "lat": "34.12358250", "lng": "108.83546000", "type": "edu"}, {"address": "Beihang University", "lat": "39.98083330", "lng": "116.34101249", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/3e76/496aa3840bca2974d6d087bfa4267a390768.pdf"}, {"id": "355af3c3adbb17d25f0d2a4193e3daadffc0d4e8", "title": "Pattern recognition: Historical perspective and future directions", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}, {"address": "George Mason University", "lat": "38.83133325", "lng": "-77.30798839", "type": "edu"}], "year": 2000, "pdf": "http://pdfs.semanticscholar.org/355a/f3c3adbb17d25f0d2a4193e3daadffc0d4e8.pdf"}, {"id": "1e6d1e811da743df02481bca1a7bdaa73b809913", "title": "Multimodal person recognition for human-vehicle interaction", "addresses": [{"address": "Sabanci University", "lat": "40.89271590", "lng": "29.37863323", "type": "edu"}], "year": 2006, "pdf": "http://research.sabanciuniv.edu/608/1/3011800001159.pdf"}, {"id": "f4aafb50c93c5ad3e5c4696ed24b063a1932915a", "title": "What would you look like in Springfield? Linear Transformations between High-Dimensional Spaces", "addresses": [{"address": "University of Bonn", "lat": "50.73381240", "lng": "7.10224650", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/f4aa/fb50c93c5ad3e5c4696ed24b063a1932915a.pdf"}, {"id": "10156890bc53cb6be97bd144a68fde693bf13612", "title": "Face Recognition Using Sparse Representation-Based Classification on K-Nearest Subspace", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/1015/6890bc53cb6be97bd144a68fde693bf13612.pdf"}, {"id": "16820ccfb626dcdc893cc7735784aed9f63cbb70", "title": "Real-time embedded age and gender classification in unconstrained video", "addresses": [{"address": "University of Ottawa", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W12/papers/Azarmehr_Real-Time_Embedded_Age_2015_CVPR_paper.pdf"}, {"id": "966b76acfa75253679b1a82ecc5a68e523f5c0c9", "title": "Preference suppression caused by misattribution of task-irrelevant subliminal motion.", "addresses": [{"address": "Boston University", "lat": "42.35042530", "lng": "-71.10056114", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/f204/2494d5666e436f5e96ff5e0cd3b5f5e5485b.pdf"}, {"id": "0c7f27d23a162d4f3896325d147f412c40160b52", "title": "Models and Algorithms for Vision through the Atmosphere", "addresses": [{"address": "Columbia University", "lat": "40.84198360", "lng": "-73.94368971", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/0c7f/27d23a162d4f3896325d147f412c40160b52.pdf"}, {"id": "6069b4bc1a21341b77b49f01341c238c770d52e0", "title": "Comparing Kernel-based Learning Methods for Face Recognition", "addresses": [{"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/b02b/50ed995fe526208b1577b9d7ef6262bf3ecf.pdf"}, {"id": "af31ef1e81c1132f186d7aebb141d7f59a815010", "title": "Domain-specific progressive sampling of face images", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "London, United Kingdom", "lat": "51.50732190", "lng": "-0.12764740", "type": "edu"}], "year": 2013, "pdf": "http://cas.ee.ic.ac.uk/people/ccb98/papers/LiuGlobalSIP13.pdf"}, {"id": "d275714c323dd4e400e8003fa8c33070f8ea03d1", "title": "White Fear, Dehumanization and Low Empathy: a Lethal Combination for Shooting Biases by Yara Mekawi", "addresses": [{"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": "2014", "pdf": "https://pdfs.semanticscholar.org/d275/714c323dd4e400e8003fa8c33070f8ea03d1.pdf"}, {"id": "88ee6d0b8342852a5bd55864dc7a1c8452c10bbf", "title": "Support Vector Machines Applied to Face Recognition", "addresses": [{"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}], "year": 1998, "pdf": "http://pdfs.semanticscholar.org/88ee/6d0b8342852a5bd55864dc7a1c8452c10bbf.pdf"}, {"id": "59f83e94a7f52cbb728d434426f6fe85f756259c", "title": "An Improved Illumination Normalization Approach based on Wavelet Tranform for Face Recognition from Single Training Image Per Person", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/59f8/3e94a7f52cbb728d434426f6fe85f756259c.pdf"}, {"id": "5d1c4e93e32ee686234c5aae7f38025523993c8c", "title": "Towards Pose Robust Face Recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2013, "pdf": "http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d539.pdf"}, {"id": "53ce84598052308b86ba79d873082853022aa7e9", "title": "Optimized Method for Real-Time Face Recognition System Based on PCA and Multiclass Support Vector Machine", "addresses": [{"address": "Islamic Azad University", "lat": "34.84529990", "lng": "48.55962120", "type": "edu"}], "year": "2013", "pdf": "https://pdfs.semanticscholar.org/4f07/b70883a98a69be3b3e29de06c73e59a9ba0e.pdf"}, {"id": "eef05b87f1a62bf658fc622427187eab4fb0f7a5", "title": "High Performance Human Face Recognition using Independent High Intensity Gabor Wavelet Responses: A Statistical Approach", "addresses": [{"address": "Jadavpur University", "lat": "22.56115370", "lng": "88.41310194", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/eef0/5b87f1a62bf658fc622427187eab4fb0f7a5.pdf"}, {"id": "f5c285c3729188884f448db3cc60647f15e289d3", "title": "Sorted Index Numbers for Privacy Preserving Face Recognition", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/f5c2/85c3729188884f448db3cc60647f15e289d3.pdf"}, {"id": "45a3ba54fc2210cf8a4fba0cbdce9dad3cefc826", "title": "Complete Cross-Validation for Nearest Neighbor Classifiers", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2000, "pdf": "http://pdfs.semanticscholar.org/45a3/ba54fc2210cf8a4fba0cbdce9dad3cefc826.pdf"}, {"id": "71e942e05f73b163a7ec814a85ff4131cb48f650", "title": "The BANCA Database and Evaluation Protocol", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/8f83/e1a0c05da3a2f316b75b4a178fadf709dd68.pdf"}, {"id": "1fe0c5562c8dffecc0cadeef2c592bfa6e89b5ca", "title": "Illumination invariant face recognition based on neural network ensemble", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}, {"address": "North Dakota State University", "lat": "46.89715500", "lng": "-96.81827603", "type": "edu"}], "year": 2004, "pdf": "http://cs.boisestate.edu/~dxu/publications/ICTAI04.pdf"}, {"id": "58da4e59c4d259196fc6bd807bc8c36636efa4ef", "title": "Symmetrical PCA in face recognition", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/58da/4e59c4d259196fc6bd807bc8c36636efa4ef.pdf"}, {"id": "c901524f01c7a0db3bb01afa1d5828913c84628a", "title": "Image Region Selection and Ensemble for Face Recognition", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2006, "pdf": "https://cs.nju.edu.cn/zhouzh/zhouzh.files/publication/jcst06.pdf"}, {"id": "221c9fff1c25368a6b72ca679c67a3d6b35e2c00", "title": "Memory-Based Face Recognition for Visitor Identification", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2000, "pdf": "http://pdfs.semanticscholar.org/5ccb/f66733438ab42fe2da66ad1d37635f4391de.pdf"}, {"id": "a3bc6020cd57ebe3a82a0b232f969bcc4e372e53", "title": "A Hybrid Feature Extraction Technique for Face Recognition", "addresses": [{"address": "University of Wollongong", "lat": "-34.40505545", "lng": "150.87834655", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/a3bc/6020cd57ebe3a82a0b232f969bcc4e372e53.pdf"}, {"id": "13d591220f9fdb22d81c2438a008c80843b61fd4", "title": "Boosting Multi-gabor Subspaces for Face Recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": "2006", "pdf": "https://pdfs.semanticscholar.org/13d5/91220f9fdb22d81c2438a008c80843b61fd4.pdf"}, {"id": "621e8882c41cdaf03a2c4a986a6404f0272ba511", "title": "On robust biometric identity verification via sparse encoding of faces: Holistic vs local approaches", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": 2012, "pdf": "https://doi.org/10.1109/IJCNN.2012.6252611"}, {"id": "8780f14d04671d4f2ed50307d16062d72cc51863", "title": "Likelihood Ratio-Based Detection of Facial Features", "addresses": [{"address": "University of Twente", "lat": "52.23801390", "lng": "6.85667610", "type": "edu"}], "year": 2000, "pdf": "http://pdfs.semanticscholar.org/8780/f14d04671d4f2ed50307d16062d72cc51863.pdf"}, {"id": "7a52eb0886892c04c6c80b78795d880a70796cb6", "title": "Perceptual distance normalization for appearance detection", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2004, "pdf": "http://www.cs.toronto.edu/~jepson/papers/ChennubhotlaJepsonICPR2004.pdf"}, {"id": "1fe121925668743762ce9f6e157081e087171f4c", "title": "Unsupervised learning of overcomplete face descriptors", "addresses": [{"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": 2015, "pdf": "https://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W02/papers/Ylioinas_Unsupervised_Learning_of_2015_CVPR_paper.pdf"}, {"id": "f3cb97791ded4a5c3bed717f820215a1c9648226", "title": "Multi-scale Block Weber Local Descriptor for Face Recognition", "addresses": [{"address": "Chongqing University", "lat": "29.50841740", "lng": "106.57858552", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/f3cb/97791ded4a5c3bed717f820215a1c9648226.pdf"}, {"id": "d28d697b578867500632b35b1b19d3d76698f4a9", "title": "Face Recognition Using Shape and Texture", "addresses": [{"address": "George Mason University", "lat": "38.83133325", "lng": "-77.30798839", "type": "edu"}], "year": 1999, "pdf": "http://pdfs.semanticscholar.org/d28d/697b578867500632b35b1b19d3d76698f4a9.pdf"}, {"id": "5a5ae31263517355d15b7b09d74cb03e40093046", "title": "Super Resolution and Face Recognition Based People Activity Monitoring Enhancement Using Surveillance Camera", "addresses": [{"address": "University of Tartu", "lat": "58.38131405", "lng": "26.72078081", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/5a5a/e31263517355d15b7b09d74cb03e40093046.pdf"}, {"id": "82524c49ea20390c711e0606e50570ac2183c281", "title": "(2D)PCA: 2-Directional 2-Dimensional PCA for Efficient Face Representation and Recognition", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/8252/4c49ea20390c711e0606e50570ac2183c281.pdf"}, {"id": "b13a882e6168afc4058fe14cc075c7e41434f43e", "title": "Recognition of Humans and Their Activities Using Video", "addresses": [{"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/b13a/882e6168afc4058fe14cc075c7e41434f43e.pdf"}, {"id": "cd0503a31a9f9040736ccfb24086dc934508cfc7", "title": "Maximizing Resource Utilization In Video Streaming Systems", "addresses": [{"address": "Wayne State University", "lat": "42.35775700", "lng": "-83.06286711", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/cd05/03a31a9f9040736ccfb24086dc934508cfc7.pdf"}, {"id": "183c10b7d9ff26576e13a6639de0f7af206ed058", "title": "Face recognition based on frontal views generated from non-frontal images", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": 2005, "pdf": "http://gravis.cs.unibas.ch/publications/CVPR05_Blanz.pdf"}, {"id": "96d34c1a749e74af0050004162d9dc5132098a79", "title": "High-speed face recognition based on discrete cosine transform and RBF neural networks", "addresses": [{"address": "Nanyang Technological University, Singapore", "lat": "1.34619520", "lng": "103.68154990", "type": "edu"}], "year": 2005, "pdf": "https://doi.org/10.1109/TNN.2005.844909"}, {"id": "559645d2447004355c83737a19c9a811b45780f1", "title": "Combining view-based pose normalization and feature transform for cross-pose face recognition", "addresses": [{"address": "Istanbul Technical University", "lat": "41.10427915", "lng": "29.02231159", "type": "edu"}, {"address": "Karlsruhe Institute of Technology", "lat": "49.10184375", "lng": "8.43312560", "type": "edu"}, {"address": "\u00c9cole Polytechnique F\u00e9d\u00e9rale de Lausanne", "lat": "46.51841210", "lng": "6.56846540", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICB.2015.7139114"}, {"id": "bc866c2ced533252f29cf2111dd71a6d1724bd49", "title": "A Multi-Modal Face Recognition Method Using Complete Local Derivative Patterns and Depth Maps", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/bc86/6c2ced533252f29cf2111dd71a6d1724bd49.pdf"}, {"id": "5e6c23d2e2f92a90bd35bdbc937b2d7d95ee2d55", "title": "Fusion of Wavelet Coefficients from Visual and Thermal Face Images for Human Face Recognition - A Comparative Study", "addresses": [{"address": "Jadavpur University", "lat": "22.56115370", "lng": "88.41310194", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/5e6c/23d2e2f92a90bd35bdbc937b2d7d95ee2d55.pdf"}, {"id": "c03e01717b2d93f04cce9b5fd2dcfd1143bcc180", "title": "Locality-Constrained Active Appearance Model", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/c03e/01717b2d93f04cce9b5fd2dcfd1143bcc180.pdf"}, {"id": "4d15254f6f31356963cc70319ce416d28d8924a3", "title": "Quo vadis Face Recognition?", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Robotics Institute", "lat": "13.65450525", "lng": "100.49423171", "type": "edu"}, {"address": "University of Pittsburgh", "lat": "40.44415295", "lng": "-79.96243993", "type": "edu"}], "year": 2001, "pdf": "http://pdfs.semanticscholar.org/4d15/254f6f31356963cc70319ce416d28d8924a3.pdf"}, {"id": "1dad684de1ce4c013ba04eb4b1a70355b3786ecd", "title": "Computers Seeing People", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": 1999, "pdf": "http://pdfs.semanticscholar.org/933d/06908b782279b1127c9ba498d868b26ffe8e.pdf"}, {"id": "52909a123ba3b088a5a93d930dcd029ec2f1f24f", "title": "A Gabor-Block-Based Kernel Discriminative Common Vector Approach Using Cosine Kernels for Human Face Recognition", "addresses": [{"address": "Jadavpur University", "lat": "22.56115370", "lng": "88.41310194", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/5d05/a0deec42a061541bbd399bc9e40d4ad3374a.pdf"}, {"id": "b374391ab793a1bb2ecde4df51be9d97c2cbf79a", "title": "Improved PCA based Face Recognition using Feature based Classifier Ensemble", "addresses": [{"address": "Eastern Mediterranean University", "lat": "35.14479945", "lng": "33.90492318", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/b374/391ab793a1bb2ecde4df51be9d97c2cbf79a.pdf"}, {"id": "d10cfcf206b0991e3bc20ac28df1f61c63516f30", "title": "Smile or smirk? Automatic detection of spontaneous asymmetric smiles to understand viewer experience", "addresses": [{"address": "Affectiva, Inc.", "lat": "42.35730460", "lng": "-71.05824150", "type": "company"}], "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553776"}, {"id": "bf0836e5c10add0b13005990ba019a9c4b744b06", "title": "An enhanced independent component-based human facial expression recognition from video", "addresses": [{"address": "Kyung Hee University", "lat": "32.85363330", "lng": "-117.20352860", "type": "edu"}], "year": 2009, "pdf": "https://doi.org/10.1109/TCE.2009.5373791"}, {"id": "051f03bc25ec633592aa2ff5db1d416b705eac6c", "title": "Partial face recognition: An alignment free approach", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2011, "pdf": "http://www.cse.msu.edu/biometrics/Publications/Face/LiaoJain_PartialFR_AlignmentFreeApproach_ICJB11.pdf"}, {"id": "aba31184783150c723805831cde0f22fe257b835", "title": "Contribution of Non-scrambled Chroma Information in Privacy-Protected Face Images to Privacy Leakage", "addresses": [{"address": "Korea Advanced Institute of Science and Technology", "lat": "36.36971910", "lng": "127.36253700", "type": "edu"}, {"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/aba3/1184783150c723805831cde0f22fe257b835.pdf"}, {"id": "7ef41e2be5116912fe8a4906b4fb89ac9dcf819d", "title": "A hybrid face recognition method using Markov random fields", "addresses": [{"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}], "year": 2004, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICPR.2004.1334492"}, {"id": "757e4cb981e807d83539d9982ad325331cb59b16", "title": "Demographics versus Biometric Automatic Interoperability", "addresses": [{"address": "George Mason University", "lat": "38.83133325", "lng": "-77.30798839", "type": "edu"}, {"address": "Sapienza University of Rome", "lat": "41.90376260", "lng": "12.51443840", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/757e/4cb981e807d83539d9982ad325331cb59b16.pdf"}, {"id": "67c08e2b8b918a61dcbd0d4c63a74b89b833d259", "title": "Multi-class texture analysis in colorectal cancer histology", "addresses": [{"address": "University of Perugia", "lat": "49.26224210", "lng": "-123.24500520", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/67c0/8e2b8b918a61dcbd0d4c63a74b89b833d259.pdf"}, {"id": "ac942c4870e55fe1d9822d62edcdb685d41cd2bf", "title": "Pose Discriminiation and Eye Detection Using Support Vector Machines (SVM)", "addresses": [{"address": "George Mason University", "lat": "38.83133325", "lng": "-77.30798839", "type": "edu"}, {"address": "University of Minnesota", "lat": "44.97308605", "lng": "-93.23708813", "type": "edu"}], "year": 1998, "pdf": "http://pdfs.semanticscholar.org/ac94/2c4870e55fe1d9822d62edcdb685d41cd2bf.pdf"}, {"id": "a632ebe6f1e7d9b2b652b0186abef8db218037f3", "title": "Subliminally and Supraliminally Acquired Long-Term Memories Jointly Bias Delayed Decisions", "addresses": [{"address": "University of Bristol", "lat": "51.45848370", "lng": "-2.60977520", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/a632/ebe6f1e7d9b2b652b0186abef8db218037f3.pdf"}, {"id": "027f769aed0cfcb3169ef60f182ce1decc0e99eb", "title": "Local Directional Pattern (LDP) for face recognition", "addresses": [{"address": "Kyung Hee University", "lat": "32.85363330", "lng": "-117.20352860", "type": "edu"}], "year": 2010, "pdf": "http://www.ijicic.org/10-12018-1.pdf"}, {"id": "edd6ed94207ab614c71ac0591d304a708d708e7b", "title": "Reconstructive discriminant analysis: A feature extraction method induced from linear regression classification", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": "2012", "pdf": "http://doi.org/10.1016/j.neucom.2012.02.001"}, {"id": "5dbf772b98cb944befa9cf01ec5d15da713a338b", "title": "Face modeling for recognition", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2001, "pdf": "http://pdfs.semanticscholar.org/9d82/44d5a32ecc314860c1d673d687df28f77d84.pdf"}, {"id": "8356b642e4e9bb39bd26ea6c4b9bad21bd9b1912", "title": "Seeing People in the Dark: Face Recognition in Infrared Images", "addresses": [{"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/8356/b642e4e9bb39bd26ea6c4b9bad21bd9b1912.pdf"}, {"id": "2b73e3d541b0208ae54b3920fef4bfd9fd0c84a7", "title": "Feature-based face representations and image reconstruction from behavioral and neural data.", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}, {"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/2b73/e3d541b0208ae54b3920fef4bfd9fd0c84a7.pdf"}, {"id": "1e8d0998c69caf6e9495db1d6df562f8b9e90003", "title": "Solving the Small Sample Size Problem of LDA", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/1e8d/0998c69caf6e9495db1d6df562f8b9e90003.pdf"}, {"id": "99b93f67c3b2b0a474bf5670a7dd40a6a0e849ac", "title": "NIMBLER: A Model of Visual Attention and Object Recognition With a Biologically Plausible Retina", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/99b9/3f67c3b2b0a474bf5670a7dd40a6a0e849ac.pdf"}, {"id": "9729930ab0f9cbcd07f1105bc69c540330cda50a", "title": "Compressing Fisher Vector for Robust Face Recognition", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ACCESS.2017.2749331"}, {"id": "459eb3cfd9b52a0d416571e4bc4e75f979f4b901", "title": "Vision development of humanoid head robot SHFR-III", "addresses": [{"address": "Shanghai University", "lat": "31.32235655", "lng": "121.38400941", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ROBIO.2015.7418998"}, {"id": "14b2dff604f148c4e5b54aa25fbecbf7f9071205", "title": "A new preselection method for face recognition in JPEG domain based on face segmentation", "addresses": [{"address": "Islamic Azad University", "lat": "34.84529990", "lng": "48.55962120", "type": "edu"}], "year": 2011, "pdf": "http://www.iranprc.org/pdf/paper/2011-06.pdf"}, {"id": "ff47698be7313005d0ea0fe0cc72c13f2f4b092a", "title": "Caring or daring? Exploring the impact of facial masculinity/femininity and gender category information on first impressions", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/ff47/698be7313005d0ea0fe0cc72c13f2f4b092a.pdf"}, {"id": "0c6a18b0cee01038eb1f9373c369835b236373ae", "title": "Learning warps based similarity for pose-unconstrained face recognition", "addresses": [{"address": "Chonbuk National University", "lat": "35.84658875", "lng": "127.13501330", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1007/s11042-017-4359-9"}, {"id": "54e6343f4368d9e5468c3e83b6eeb3a58a3c7555", "title": "Reconstructing Perceived and Retrieved Faces from Activity Patterns in Lateral Parietal Cortex.", "addresses": [{"address": "New York University", "lat": "40.72925325", "lng": "-73.99625394", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/54e6/343f4368d9e5468c3e83b6eeb3a58a3c7555.pdf"}, {"id": "dc964b9c7242a985eb255b2410a9c45981c2f4d0", "title": "Feature Extraction by Using Dual-Generalized Discriminative Common Vectors", "addresses": [{"address": "Universitat Aut\u00f2noma de Barcelona", "lat": "41.50078110", "lng": "2.11143663", "type": "edu"}], "year": "2018", "pdf": "http://doi.org/10.1007/s10851-018-0837-6"}, {"id": "8023864256a1a4a26e130a7165f3d70875c27467", "title": "LUT-Based Adaboost for Gender Classification", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/8023/864256a1a4a26e130a7165f3d70875c27467.pdf"}, {"id": "2a77e3221d0512aa5674cf6f9041c1ce81fc07f0", "title": "An Automatic Hybrid Segmentation Approach for Aligned Face Portrait Images", "addresses": [{"address": "Graz University of Technology", "lat": "47.05821000", "lng": "15.46019568", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/65de/08bab21921fba39e97f0bc3585f62cb2bd5d.pdf"}, {"id": "aff92784567095ee526a705e21be4f42226bbaab", "title": "Face recognition in uncontrolled environments", "addresses": [{"address": "University College London", "lat": "51.52316070", "lng": "-0.12820370", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/aff9/2784567095ee526a705e21be4f42226bbaab.pdf"}, {"id": "a7d7fba176e442f60899c57b976ae6de6d013ceb", "title": "Gender differences in experiential and facial reactivity to approval and disapproval during emotional social interactions", "addresses": [{"address": "University of Salzburg", "lat": "47.79475945", "lng": "13.05417525", "type": "edu"}, {"address": "University of Amsterdam", "lat": "52.35536550", "lng": "4.95016440", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/a7d7/fba176e442f60899c57b976ae6de6d013ceb.pdf"}, {"id": "ca50b25eaad0c9146fc5a4a2cd4c472c77b970ba", "title": "Face Recognition Using Histogram-based Features in Spatial and Frequency Domains", "addresses": [{"address": "Kogakuin University", "lat": "35.69027840", "lng": "139.69540096", "type": "edu"}, {"address": "Tohoku University", "lat": "38.25309450", "lng": "140.87365930", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/ca50/b25eaad0c9146fc5a4a2cd4c472c77b970ba.pdf"}, {"id": "79dc9a1aa2ab7fa46e8024bd654a4a5776c1a6d6", "title": "Robust non-rigid 3D tracking for face recognition in real-world videos", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2011, "pdf": "http://mmlab.siat.ac.cn/sfchen-old/Publications/ICIA11-3Dtracking.pdf"}, {"id": "ffe4bb47ec15f768e1744bdf530d5796ba56cfc1", "title": "AFIF4: Deep Gender Classification based on AdaBoost-based Fusion of Isolated Facial Features and Foggy Faces", "addresses": [{"address": "York University", "lat": "43.77439110", "lng": "-79.50481085", "type": "edu"}, {"address": "Assiut University", "lat": "27.18794105", "lng": "31.17009498", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1706.04277.pdf"}, {"id": "cd6aaa37fffd0b5c2320f386be322b8adaa1cc68", "title": "Deep Face Recognition: A Survey", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.06655.pdf"}, {"id": "f2ad9b43bac8c2bae9dea694f6a4e44c760e63da", "title": "A Study on Illumination Invariant Face Recognition Methods Based on Multiple Eigenspaces", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}, {"address": "North Dakota State University", "lat": "46.89715500", "lng": "-96.81827603", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/f2ad/9b43bac8c2bae9dea694f6a4e44c760e63da.pdf"}, {"id": "933d06908b782279b1127c9ba498d868b26ffe8e", "title": "Computers Seeing People", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": "1999", "pdf": "https://pdfs.semanticscholar.org/933d/06908b782279b1127c9ba498d868b26ffe8e.pdf"}, {"id": "9e31e77f9543ab42474ba4e9330676e18c242e72", "title": "The Devil of Face Recognition is in the Noise", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11649.pdf"}, {"id": "769d1a0aff0cf7842c7861d30ce654a029d6b467", "title": "Descriptor Learning Based on Fisher Separation Criterion for Texture Classification", "addresses": [{"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/769d/1a0aff0cf7842c7861d30ce654a029d6b467.pdf"}, {"id": "380862d22617064ffab1a3b42f0b11752d6bd785", "title": "Recognition from a Single Sample per Person with Multiple SOM Fusion", "addresses": [{"address": "Fudan University", "lat": "31.30104395", "lng": "121.50045497", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/3808/62d22617064ffab1a3b42f0b11752d6bd785.pdf"}, {"id": "f2cc459ada3abd9d8aa82e92710676973aeff275", "title": "Object class recognition using range of multiple computer vision algorithms", "addresses": [{"address": "South East European University", "lat": "41.98676415", "lng": "20.96254516", "type": "edu"}], "year": 2011, "pdf": "http://ieeexplore.ieee.org/document/5967185/"}, {"id": "4bc55ffc2f53801267ca1767028515be6e84f551", "title": "The Decision to Engage Cognitive Control Is Driven by Expected Reward-Value: Neural and Behavioral Evidence", "addresses": [{"address": "University of British Columbia", "lat": "49.25839375", "lng": "-123.24658161", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/4bc5/5ffc2f53801267ca1767028515be6e84f551.pdf"}, {"id": "0fae5d9d2764a8d6ea691b9835d497dd680bbccd", "title": "Face Recognition using Canonical Correlation Analysis", "addresses": [{"address": "Indian Institute of Technology Delhi", "lat": "28.54632595", "lng": "77.27325504", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/0fae/5d9d2764a8d6ea691b9835d497dd680bbccd.pdf"}, {"id": "71ed20748c919cd261024b146992ced4c9c2157b", "title": "Learning Semantic Patterns with Discriminant Localized Binary Projections", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "Microsoft Research Asia", "lat": "39.97721700", "lng": "116.33763200", "type": "company"}, {"address": "Beckman Institute", "lat": "40.11571585", "lng": "-88.22750772", "type": "edu"}], "year": 2006, "pdf": "http://mmlab.ie.cuhk.edu.hk/archive/2006/01640756.pdf"}, {"id": "9103148dd87e6ff9fba28509f3b265e1873166c9", "title": "Face Analysis using 3D Morphable Models", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/9103/148dd87e6ff9fba28509f3b265e1873166c9.pdf"}, {"id": "80cef64706957c53a31b67045d208efe39205c9e", "title": "Deficits in other-race face recognition: no evidence for encoding-based effects.", "addresses": [{"address": "Arizona State University", "lat": "33.30715065", "lng": "-111.67653157", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/80ce/f64706957c53a31b67045d208efe39205c9e.pdf"}, {"id": "eb6f15c59e6f2ffaa9a0a55d3f045c23a5a6d275", "title": "State-Trace Analysis of the Face Inversion Effect", "addresses": [{"address": "University of Newcastle", "lat": "-33.35788990", "lng": "151.37834708", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/eb6f/15c59e6f2ffaa9a0a55d3f045c23a5a6d275.pdf"}, {"id": "7589bded8fed54d6eb7800d24ace662b37ed0779", "title": "Face Recognition Algorithm Using Muti-direction Markov Stationary Features and Adjacent Pixel Intensity Difference Quantization Histogram", "addresses": [{"address": "Tohoku University", "lat": "38.25309450", "lng": "140.87365930", "type": "edu"}], "year": "2012", "pdf": "https://pdfs.semanticscholar.org/7589/bded8fed54d6eb7800d24ace662b37ed0779.pdf"}, {"id": "344a5802999dddd0a6d1c4d511910af2eb922231", "title": "DroneFace: An Open Dataset for Drone Research", "addresses": [{"address": "Feng Chia University", "lat": "24.18005755", "lng": "120.64836072", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/f0ba/552418698d1b881c6f9f02e2c84f969e66f3.pdf"}, {"id": "7c87f445a15597f603756587e0f9b8cf4d942ecc", "title": "Analysis of Sampling Techniques for Learning Binarized Statistical Image Features Using Fixations and Salience", "addresses": [{"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/7c87/f445a15597f603756587e0f9b8cf4d942ecc.pdf"}, {"id": "04e06481e455c6eb838c22e8505dafc01b7d0cfa", "title": "L<inf>1</inf> regularized projection pursuit for additive model learning", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2008, "pdf": "http://mmlab.ie.cuhk.edu.hk/archive/2008/L1.pdf"}, {"id": "841855205818d3a6d6f85ec17a22515f4f062882", "title": "Low Resolution Face Recognition in the Wild", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.11529.pdf"}, {"id": "05bd6c2bc5dc6d65c48c6366788441bcfdd9db3a", "title": "Personalizing Smart Environments: Face Recognition for Human Interaction", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 1999, "pdf": "http://pdfs.semanticscholar.org/05bd/6c2bc5dc6d65c48c6366788441bcfdd9db3a.pdf"}, {"id": "9902acd6ce7662c93ee2bd41c6c11a6b99ad8754", "title": "Robust Multimodal Biometric System using Markov Chain based Rank Level Fusion", "addresses": [{"address": "University of Calgary", "lat": "51.07840380", "lng": "-114.12870770", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/9902/acd6ce7662c93ee2bd41c6c11a6b99ad8754.pdf"}, {"id": "6342a4c54835c1e14159495373ab18b4233d2d9b", "title": "Towards Pose-robust Face Recognition on Video", "addresses": [{"address": "Queensland University of Technology", "lat": "-27.47715625", "lng": "153.02841004", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/6342/a4c54835c1e14159495373ab18b4233d2d9b.pdf"}, {"id": "241416b1249d2b71b373f8dcf054110d579a2148", "title": "Biometric face recognition using multilinear projection and artificial intelligence", "addresses": [{"address": "Newcastle University", "lat": "54.98023235", "lng": "-1.61452627", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/2414/16b1249d2b71b373f8dcf054110d579a2148.pdf"}, {"id": "95d567081510e8e59834febc958668015c174602", "title": "Combining Gabor features: summing vs. voting in human face recognition", "addresses": [{"address": "Wayne State University", "lat": "42.35775700", "lng": "-83.06286711", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/95d5/67081510e8e59834febc958668015c174602.pdf"}, {"id": "e984017c5849ea78e3f50e374a5539770989536d", "title": "Bilinear Discriminant Analysis for Face Recognition", "addresses": [{"address": "\u00c9cole Centrale de Lyon", "lat": "45.78359660", "lng": "4.76789480", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/e984/017c5849ea78e3f50e374a5539770989536d.pdf"}, {"id": "b313751548018e4ecd5ae2ce6b3b94fbd9cae33e", "title": "Evaluation of Face Datasets as Tools for Assessing the\u00a0Performance of Face Recognition Methods", "addresses": [{"address": "National Institutes of Health", "lat": "39.00041165", "lng": "-77.10327775", "type": "edu"}], "year": "2008", "pdf": "http://doi.org/10.1007/s11263-008-0143-7"}, {"id": "ed9d11e995baeec17c5d2847ec1a8d5449254525", "title": "Efficient Gender Classification Using a Deep LDA-Pruned Net", "addresses": [{"address": "McGill University", "lat": "45.50397610", "lng": "-73.57496870", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/ed9d/11e995baeec17c5d2847ec1a8d5449254525.pdf"}, {"id": "385fa8768d174a9044bc723548a7f8810a62606c", "title": "Using an holistic method based on prior information to represent global and local variations on face images", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/385f/a8768d174a9044bc723548a7f8810a62606c.pdf"}, {"id": "826f1ac8ef16abd893062fdf5058a09881aed516", "title": "Identity-Preserving Face Recovery from Portraits", "addresses": [{"address": "Australian National University", "lat": "-35.27769990", "lng": "149.11852700", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1801.02279.pdf"}, {"id": "1d5219687b9e63767f19cd804147c256c5a5a3bc", "title": "Patch-based locality-enhanced collaborative representation for face recognition", "addresses": [{"address": "Tianjin University", "lat": "36.20304395", "lng": "117.05842113", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/1d52/19687b9e63767f19cd804147c256c5a5a3bc.pdf"}, {"id": "a1c1970f7c728cc96aea798d65d38df7c9ea61dc", "title": "Eye Location Using Genetic Algorithm", "addresses": [{"address": "George Mason University", "lat": "38.83133325", "lng": "-77.30798839", "type": "edu"}], "year": 1999, "pdf": "http://pdfs.semanticscholar.org/a1c1/970f7c728cc96aea798d65d38df7c9ea61dc.pdf"}, {"id": "e121bf6f18e1cb114216a521df63c55030d10fbe", "title": "Robust Facial Component Detection for Face Alignment Applications", "addresses": [{"address": "Graz University of Technology", "lat": "47.05821000", "lng": "15.46019568", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/e121/bf6f18e1cb114216a521df63c55030d10fbe.pdf"}, {"id": "bc6011807fadc2d3e6bc97bb2c2ecee5ec1b64a8", "title": "Robust Face Recognition from a Single Training Image per Person with Kernel-Based SOM-Face", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/bc60/11807fadc2d3e6bc97bb2c2ecee5ec1b64a8.pdf"}, {"id": "ca2e14671f5043dab985dd18e10c5e3f51e2e8be", "title": "Face Recognition by Using Elongated Local Binary Patterns with Average Maximum Distance Gradient Magnitude", "addresses": [{"address": "Hong Kong University of Science and Technology", "lat": "22.33863040", "lng": "114.26203370", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/ca2e/14671f5043dab985dd18e10c5e3f51e2e8be.pdf"}, {"id": "edc6d96ae195897b33c07f5fa428149915b4cf6a", "title": "Face Pose Estimation System by Combining Hybrid Ica-svm Learning and 3d Modeling", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/edc6/d96ae195897b33c07f5fa428149915b4cf6a.pdf"}, {"id": "f65ff9d6d0025f198ac4f924d2f0df121bc51c67", "title": "Overlapping on Partitioned Facial Images", "addresses": [{"address": "Eastern Mediterranean University", "lat": "35.14479945", "lng": "33.90492318", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/f65f/f9d6d0025f198ac4f924d2f0df121bc51c67.pdf"}, {"id": "4efd58102ff46b7435c9ec6d4fc3dd21d93b15b4", "title": "Matching Software-Generated Sketches to Face Photographs With a Very Deep CNN, Morphed Faces, and Transfer Learning", "addresses": [{"address": "University of Malta", "lat": "35.90232260", "lng": "14.48341890", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/TIFS.2017.2788002"}, {"id": "e26a7e343fe109e2b52d1eeea5b02dae836f3502", "title": "Facial Expression Recognition Utilizing Local Direction-Based Robust Features and Deep Belief Network", "addresses": [{"address": "King Saud University", "lat": "24.72464030", "lng": "46.62335012", "type": "edu"}, {"address": "University of Oslo", "lat": "59.93891665", "lng": "10.72170765", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ACCESS.2017.2676238"}, {"id": "4cb8a691a15e050756640c0a35880cdd418e2b87", "title": "Class-Based Matching of Object Parts", "addresses": [{"address": "Weizmann Institute of Science", "lat": "31.90784990", "lng": "34.81334092", "type": "edu"}], "year": 2004, "pdf": "http://www.vision.caltech.edu/~bart/Publications/2004/BartUllmanClassBasedMatching.pdf"}, {"id": "99b8a24aacaa53fa3f8a7e48734037c7b16f1c40", "title": "A Proposal to Improve the Authentication Process in m-Health Environments", "addresses": [{"address": "Universitat Polit\u00e8cnica de Val\u00e8ncia", "lat": "39.48083760", "lng": "-0.34095220", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ACCESS.2017.2752176"}, {"id": "c2fa83e8a428c03c74148d91f60468089b80c328", "title": "Optimal Mean Robust Principal Component Analysis", "addresses": [{"address": "University of Texas at Arlington", "lat": "32.72836830", "lng": "-97.11201835", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/c2fa/83e8a428c03c74148d91f60468089b80c328.pdf"}, {"id": "0fbe38527279f49561c0e1c6ff4e8f733fb79bbe", "title": "Integrating Utility into Face De-identification", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/7561/b691eb5e9913e4c3cb11caf2738d58b9c896.pdf"}, {"id": "43a03cbe8b704f31046a5aba05153eb3d6de4142", "title": "Towards Robust Face Recognition from Video", "addresses": [{"address": "Oak Ridge National Laboratory", "lat": "35.93006535", "lng": "-84.31240032", "type": "edu"}], "year": 2001, "pdf": "http://pdfs.semanticscholar.org/9594/3329cd6922a869dd6d58ef01e9492879034c.pdf"}, {"id": "cdd2ba6e6436cb5950692702053195a22789d129", "title": "Face-likeness and image variability drive responses in human face-selective ventral regions.", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": "2012", "pdf": "https://pdfs.semanticscholar.org/976c/3b5ad438fb0cf2fb157964e8e6f07a09ad9e.pdf"}, {"id": "b910590a0eb191d03e1aedb3d55c905129e92e6b", "title": "Robust gender classification on unconstrained face images", "addresses": [{"address": "Anhui University", "lat": "31.76909325", "lng": "117.17795091", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2015, "pdf": "http://doi.acm.org/10.1145/2808492.2808570"}, {"id": "dc4089294cb15e071893d24bdf2baa15de5dcb0b", "title": "Feature selection for subject identification in surveillance photos [face recognition applications]", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2004, "pdf": "http://www.comm.toronto.edu/~kostas/Publications2008/pub/proceed/105.pdf"}, {"id": "a80d057099a6ca872508f5d416a8cd67b788506a", "title": "A dissociation between similarity effects in episodic face recognition.", "addresses": [{"address": "University of Newcastle", "lat": "-33.35788990", "lng": "151.37834708", "type": "edu"}], "year": "2009", "pdf": "https://pdfs.semanticscholar.org/a80d/057099a6ca872508f5d416a8cd67b788506a.pdf"}, {"id": "56c273538a2dbb4cf43c39fa4725592e97ec1681", "title": "Eye Tracking to Enhance Facial Recognition Algorithms", "addresses": [{"address": "Clemson University", "lat": "34.66869155", "lng": "-82.83743476", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/56c2/73538a2dbb4cf43c39fa4725592e97ec1681.pdf"}, {"id": "c1f07ec629be1c6fe562af0e34b04c54e238dcd1", "title": "A Novel Facial Feature Localization Method Using Probabilistic-like Output", "addresses": [{"address": "University of Miami", "lat": "25.71733390", "lng": "-80.27866887", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/c1f0/7ec629be1c6fe562af0e34b04c54e238dcd1.pdf"}, {"id": "5173a20304ea7baa6bfe97944a5c7a69ea72530f", "title": "Best Basis Selection Method Using Learning Weights for Face Recognition", "addresses": [{"address": "Yonsei University", "lat": "37.56004060", "lng": "126.93692480", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/5173/a20304ea7baa6bfe97944a5c7a69ea72530f.pdf"}, {"id": "83e893858d6a6b8abb07d89e9f821f90c2b074ea", "title": "Facial image retrieval based on demographic classification", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2004, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICPR.2004.1334677"}, {"id": "2d8a84a8e661ce3913cb6c05b18984b14ed11dac", "title": "P3: Toward Privacy-Preserving Photo Sharing", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/6fd6/af3864fc5eb62e6328be79bf8174e939efcc.pdf"}, {"id": "643d11703569766bed0a994941ae5f7b3e101659", "title": "Unsupervised Training for 3D Morphable Model Regression", "addresses": [{"address": "Princeton University", "lat": "40.34829285", "lng": "-74.66308325", "type": "edu"}, {"address": "MIT CSAIL", "lat": "42.36194070", "lng": "-71.09043780", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.06098.pdf"}, {"id": "fcfb48b19f37e531a56ae95186a214b05c0b94c7", "title": "FACE RECOGNITION WITH EIGENFACES \u2013 A DETAILED STUDY", "addresses": [{"address": "University of KwaZulu-Natal", "lat": "-29.86742190", "lng": "30.98072720", "type": "edu"}], "year": "2012", "pdf": "https://pdfs.semanticscholar.org/fcfb/48b19f37e531a56ae95186a214b05c0b94c7.pdf"}, {"id": "8c22dc1b494c4612c4ebc61b22a480666cd841d5", "title": "Towards Practical Facial Feature Detection", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}, {"address": "University of Texas at Austin", "lat": "30.28415100", "lng": "-97.73195598", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/b95b/9fcccb23be8948e96f0c110aaaedc0f7334a.pdf"}, {"id": "297c4503a18a959e3a06613d5e7e026ba351b9bf", "title": "Neurolaw: Differential brain activity for black and white faces predicts damage awards in hypothetical employment discrimination cases.", "addresses": [{"address": "Yale University", "lat": "41.25713055", "lng": "-72.98966960", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/297c/4503a18a959e3a06613d5e7e026ba351b9bf.pdf"}, {"id": "4ff4c27e47b0aa80d6383427642bb8ee9d01c0ac", "title": "Deep Convolutional Neural Networks and Support Vector Machines for Gender Recognition", "addresses": [{"address": "University of Groningen", "lat": "53.21967825", "lng": "6.56251482", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/SSCI.2015.37"}, {"id": "90f0e0701b755bbce89cb0e4e3f0a070d49814a0", "title": "Beyond the retina: Evidence for a face inversion effect in the environmental frame of reference", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/90f0/e0701b755bbce89cb0e4e3f0a070d49814a0.pdf"}, {"id": "a6f93435e006328fd0a5dcb7639e771431cc2c37", "title": "Why Some Faces won't be Remembered: Brain Potentials Illuminate Successful Versus Unsuccessful Encoding for Same-Race and Other-Race Faces", "addresses": [{"address": "Northwestern University", "lat": "42.05511640", "lng": "-87.67581113", "type": "edu"}, {"address": "New York University", "lat": "40.72925325", "lng": "-73.99625394", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/c161/7c3c90e4596867d94a00a3a2bb1d55c8843b.pdf"}, {"id": "e2aafdd2f508ee383a0227de9cee00246f251ebf", "title": "Face Matching Under Time Pressure and Task Demands", "addresses": [{"address": "University of Adelaide", "lat": "-34.91892260", "lng": "138.60423668", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/c6f0/53bc5dbdcd89cba842251feaa4bb8b91378b.pdf"}, {"id": "651ea8b030470ab4a70efced154e77028a102713", "title": "Increasing Face Recognition Rate", "addresses": [{"address": "University of KwaZulu-Natal", "lat": "-29.86742190", "lng": "30.98072720", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/651e/a8b030470ab4a70efced154e77028a102713.pdf"}, {"id": "62647a8f8a534db2ccfd0df7d513b4f084231d10", "title": "Weighted SOM-Face: Selecting Local Features for Recognition from Individual Face Image", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}, {"address": "Fudan University", "lat": "31.30104395", "lng": "121.50045497", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/6264/7a8f8a534db2ccfd0df7d513b4f084231d10.pdf"}, {"id": "d31bf8f6f9404a0ab2e601e723b9a07287d0693b", "title": "Feature Space Reduction for Face Recognition with Dual Linear Discriminant Analysis", "addresses": [{"address": "Warsaw University of Technology", "lat": "52.22165395", "lng": "21.00735776", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/d31b/f8f6f9404a0ab2e601e723b9a07287d0693b.pdf"}, {"id": "35cdd4df9f039f475247bf03fdcc605e40683dce", "title": "Eye Detection and Face Recognition Using Evolutionary Computation", "addresses": [{"address": "George Mason University", "lat": "38.83133325", "lng": "-77.30798839", "type": "edu"}], "year": 1998, "pdf": "http://pdfs.semanticscholar.org/35cd/d4df9f039f475247bf03fdcc605e40683dce.pdf"}, {"id": "5c707dc74c3c39674f74dc22f6b6325af456811c", "title": "Restoring occluded regions using FW-PCA for face recognition", "addresses": [{"address": "Tohoku University", "lat": "38.25309450", "lng": "140.87365930", "type": "edu"}], "year": 2012, "pdf": "http://www.aoki.ecei.tohoku.ac.jp/~ito/W13_04.pdf"}, {"id": "a40476d94c5cf1f929ee9514d3761dca00dd774b", "title": "Watch List Face Surveillance Using Transductive Inference", "addresses": [{"address": "George Mason University", "lat": "38.83133325", "lng": "-77.30798839", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/a404/76d94c5cf1f929ee9514d3761dca00dd774b.pdf"}, {"id": "e4691de78d35ed7085311a466b8d02198bf714ac", "title": "The relation between race-related implicit associations and scalp-recorded neural activity evoked by faces from different races.", "addresses": [{"address": "Yale University", "lat": "41.25713055", "lng": "-72.98966960", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/e469/1de78d35ed7085311a466b8d02198bf714ac.pdf"}, {"id": "85639cefb8f8deab7017ce92717674d6178d43cc", "title": "Automatic Analysis of Spontaneous Facial Behavior: A Final Project Report", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": 2001, "pdf": "http://pdfs.semanticscholar.org/8563/9cefb8f8deab7017ce92717674d6178d43cc.pdf"}, {"id": "a967426ec9b761a989997d6a213d890fc34c5fe3", "title": "Relative ranking of facial attractiveness", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": 2013, "pdf": "http://vision.ucsd.edu/sites/default/files/043-wacv.pdf"}, {"id": "055530f7f771bb1d5f352e2758d1242408d34e4d", "title": "A Facial Expression Recognition System from Depth Video", "addresses": [{"address": "SungKyunKwan University", "lat": "37.30031270", "lng": "126.97212300", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/0555/30f7f771bb1d5f352e2758d1242408d34e4d.pdf"}, {"id": "be84d76093a791bf78bed74ef1d7db54abeca878", "title": "Open World Face Recognition with Credibility and Confidence Measures", "addresses": [{"address": "George Mason University", "lat": "38.83133325", "lng": "-77.30798839", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/be84/d76093a791bf78bed74ef1d7db54abeca878.pdf"}, {"id": "ddb49e36570af09d96059b3b6f08f9124aafe24f", "title": "A Non-Iterative Approach to Reconstruct Face Templates from Match Scores", "addresses": [{"address": "University of South Florida", "lat": "28.05999990", "lng": "-82.41383619", "type": "edu"}], "year": 2006, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICPR.2006.129"}, {"id": "98fcf33916a9bb4efdc652541573b2e7ef9e7d87", "title": "Trustworthy Tricksters: Violating a Negative Social Expectation Affects Source Memory and Person Perception When Fear of Exploitation Is High", "addresses": [{"address": "Georgia Southern University", "lat": "32.42143805", "lng": "-81.78450529", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/98fc/f33916a9bb4efdc652541573b2e7ef9e7d87.pdf"}, {"id": "11fa5abb5d5d09efbf9dacae6a6ceb9b2647f877", "title": "DCTNet: A simple learning-free approach for face recognition", "addresses": [{"address": "Yonsei University", "lat": "37.56004060", "lng": "126.93692480", "type": "edu"}], "year": 2015, "pdf": "https://arxiv.org/pdf/1507.02049v3.pdf"}, {"id": "cfdc4d0f8e1b4b9ced35317d12b4229f2e3311ab", "title": "Quaero at TRECVID 2010: Semantic Indexing", "addresses": [{"address": "Karlsruhe Institute of Technology", "lat": "49.10184375", "lng": "8.43312560", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/cfdc/4d0f8e1b4b9ced35317d12b4229f2e3311ab.pdf"}, {"id": "65f6d0d91cdf1a77e3c5cb78c7d21f0f4f01f8b5", "title": "PhD Thesis Incremental, Robust, and Efficient Linear Discriminant Analysis Learning", "addresses": [{"address": "Graz University of Technology", "lat": "47.05821000", "lng": "15.46019568", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/65f6/d0d91cdf1a77e3c5cb78c7d21f0f4f01f8b5.pdf"}, {"id": "0e9ea74cf7106057efdb63f275ca6bb838168b0c", "title": "Progressive Principal Component Analysis", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/0e9e/a74cf7106057efdb63f275ca6bb838168b0c.pdf"}, {"id": "d4d2014f05e17869b72f180fd0065358c722ac65", "title": "UNIVERSITY OF CALGARY A MULTIMODAL BIOMETRIC SYSTEM BASED ON RANK LEVEL FUSION by MD. MARUF MONWAR A THESIS SUBMITTED TO THE FACULTY OF GRADUATE STUDIES IN PARTIAL FULFILMENT OF THE REQUIREMENTS FOR THE DEGREE OF DOCTOR OF PHILOSOPHY", "addresses": [{"address": "University of Calgary", "lat": "51.07840380", "lng": "-114.12870770", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/d4d2/014f05e17869b72f180fd0065358c722ac65.pdf"}, {"id": "fdd7c9f3838b8d868911afaafa08beffb79b5228", "title": "An efficient mechanism for compensating vague pattern identification in support of a multi-criteria recommendation system", "addresses": [{"address": "Feng Chia University", "lat": "24.18005755", "lng": "120.64836072", "type": "edu"}, {"address": "National Cheng Kung University", "lat": "22.99919160", "lng": "120.21625134", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/fdd7/c9f3838b8d868911afaafa08beffb79b5228.pdf"}, {"id": "d3d5d86afec84c0713ec868cf5ed41661fc96edc", "title": "A Comprehensive Analysis of Deep Learning Based Representation for Face Recognition", "addresses": [{"address": "Istanbul Technical University", "lat": "41.10427915", "lng": "29.02231159", "type": "edu"}, {"address": "Sabanci University", "lat": "40.89271590", "lng": "29.37863323", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1606.02894.pdf"}, {"id": "20100dbeb2dfebc7595d79755d737b21e75f39a6", "title": "Cluster Indicator Decomposition for Efficient Matrix Factorization", "addresses": [{"address": "University of Texas at Arlington", "lat": "32.72836830", "lng": "-97.11201835", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/2010/0dbeb2dfebc7595d79755d737b21e75f39a6.pdf"}, {"id": "3ca9453d3c023bb81cce72ff2d633fc5075e1df6", "title": "Generic vs. Person Specific Active Appearance Models", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/e36f/5fab8758194fcad043e23288330657fe7742.pdf"}, {"id": "d082f35534932dfa1b034499fc603f299645862d", "title": "TAMING WILD FACES: WEB-SCALE, OPEN-UNIVERSE FACE IDENTIFICATION IN STILL AND VIDEO IMAGERY by ENRIQUE", "addresses": [{"address": "University of Central Florida", "lat": "28.59899755", "lng": "-81.19712501", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/d082/f35534932dfa1b034499fc603f299645862d.pdf"}, {"id": "649b47e02b82afeccc858f1f3dcec98379bfbbbd", "title": "Face Alignment Under Various Poses and Expressions", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/649b/47e02b82afeccc858f1f3dcec98379bfbbbd.pdf"}, {"id": "7264c2a8900c2ab41575578eb2d50557b2829f84", "title": "Silhouetted face profiles: a new methodology for face perception research.", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/7264/c2a8900c2ab41575578eb2d50557b2829f84.pdf"}, {"id": "9887ab220254859ffc7354d5189083a87c9bca6e", "title": "Generic Image Classification Approaches Excel on Face Recognition", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}, {"address": "University of Adelaide", "lat": "-34.91892260", "lng": "138.60423668", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/9887/ab220254859ffc7354d5189083a87c9bca6e.pdf"}, {"id": "6fe83b5fdeeb6d92f24af3aed6a34c5bf9ce8845", "title": "Face Recognition Based on Local Directional Pattern Variance (LDPv)", "addresses": [{"address": "Kyung Hee University", "lat": "32.85363330", "lng": "-117.20352860", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/6fe8/3b5fdeeb6d92f24af3aed6a34c5bf9ce8845.pdf"}, {"id": "6e177341d4412f9c9a639e33e6096344ef930202", "title": "A Gender Recognition System from Facial Image", "addresses": [{"address": "Institute of Information Technology", "lat": "23.72898990", "lng": "90.39826820", "type": "edu"}, {"address": "University of Dhaka", "lat": "23.73169570", "lng": "90.39652750", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/2e58/ec57d71b2b2a3e71086234dd7037559cc17e.pdf"}, {"id": "327eab70296d39511d61e91c6839446d59f5e119", "title": "Roadmap for Reliable Ensemble Forecasting of the Sun-Earth System", "addresses": [{"address": "New Jersey Institute of Technology", "lat": "40.74230250", "lng": "-74.17928172", "type": "edu"}, {"address": "University of Hawaii", "lat": "21.29827950", "lng": "-157.81869230", "type": "edu"}, {"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}, {"address": "George Mason University", "lat": "38.83133325", "lng": "-77.30798839", "type": "edu"}, {"address": "California Institute of Technology", "lat": "34.13710185", "lng": "-118.12527487", "type": "edu"}, {"address": "University of Michigan", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu"}, {"address": "Utah State University", "lat": "41.74115040", "lng": "-111.81223090", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/327e/ab70296d39511d61e91c6839446d59f5e119.pdf"}, {"id": "b29f348e8675f75ff160ec65ebeeb3f3979b65d8", "title": "An objective and subjective evaluation of content-based privacy protection of face images in video surveillance systems using JPEG XR", "addresses": [{"address": "Korea Advanced Institute of Science and Technology", "lat": "36.36971910", "lng": "127.36253700", "type": "edu"}, {"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/b29f/348e8675f75ff160ec65ebeeb3f3979b65d8.pdf"}, {"id": "1c2724243b27a18a2302f12dea79d9a1d4460e35", "title": "Fisher+Kernel criterion for discriminant analysis", "addresses": [{"address": "Peking University", "lat": "39.99223790", "lng": "116.30393816", "type": "edu"}, {"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2005, "pdf": "http://read.pudn.com/downloads157/doc/697237/kfd/Fisher+Kernel%20criterion%20for%20discriminant%20analysis.pdf"}, {"id": "d16f37a15f6385a6a189b06833745da5d524f69b", "title": "Hebb repetition effects for non-verbal visual sequences: determinants of sequence acquisition.", "addresses": [{"address": "Bournemouth University", "lat": "50.74223495", "lng": "-1.89433739", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/d16f/37a15f6385a6a189b06833745da5d524f69b.pdf"}, {"id": "13791aa7c1047724c4046eee94e66a506b211eb9", "title": "Real-time Gender Classification", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/1379/1aa7c1047724c4046eee94e66a506b211eb9.pdf"}, {"id": "fa72e39971855dff6beb8174b5fa654e0ab7d324", "title": "A depth video-based facial expression recognition system using radon transform, generalized discriminant analysis, and hidden Markov model", "addresses": [{"address": "SungKyunKwan University", "lat": "37.30031270", "lng": "126.97212300", "type": "edu"}, {"address": "King Saud University", "lat": "24.72464030", "lng": "46.62335012", "type": "edu"}], "year": 2013, "pdf": "https://doi.org/10.1007/s11042-013-1793-1"}, {"id": "3843b8c4143e9f1e50c61eb462376e65861bbf24", "title": "Color Image Processing Using Reduced Biquaternions with Application to Face Recognition in a PCA Framework", "addresses": [{"address": "Assiut University", "lat": "27.18794105", "lng": "31.17009498", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.359"}, {"id": "0cc3c62f762d64cffcab4ac7fea3896cb22a3df9", "title": "Preserving Privacy by De-identifying Facial Images", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/d30f/cc0e4c2c78cc5ff7bbd1227d3952d366a479.pdf"}, {"id": "2cae2ca6221fbfa9655e41ac52e54631ada7ad2c", "title": "Electoral College and Direct Popular Vote for Multi-Candidate Election", "addresses": [{"address": "University of Northern British Columbia", "lat": "53.89256620", "lng": "-122.81471592", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/ffd6/14925a326efcb27ef52accd5638a912b4792.pdf"}, {"id": "18b4e9e51ee14c9d816358fbe1af29f0771b7916", "title": "Intelligent environments and active camera networks", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": 2000, "pdf": "http://pdfs.semanticscholar.org/18b4/e9e51ee14c9d816358fbe1af29f0771b7916.pdf"}, {"id": "0dde6981047067692793b71a2f7ad6a8708741d8", "title": "MODELING PHYSICAL PERSONALITIES FOR VIRTUAL AGENTS BY MODELING TRAIT IMPRESSIONS OF THE FACE: A NEURAL NETWORK ANALYSIS by SHERYL BRAHNAM", "addresses": [{"address": "City University of New York", "lat": "40.87228250", "lng": "-73.89489171", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/0dde/6981047067692793b71a2f7ad6a8708741d8.pdf"}, {"id": "20675281008211641d28ce0f2b6946537a8535c4", "title": "Multi-resolution Histograms of Local Variation Patterns (MHLVP) for Robust Face Recognition", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2005, "pdf": "http://pdfs.semanticscholar.org/2067/5281008211641d28ce0f2b6946537a8535c4.pdf"}, {"id": "2e6e335e591da1e8899ff53f9a7ddb4c63520104", "title": "Is All Face Processing Holistic? The View from UCSD", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}, {"address": "University of Iowa", "lat": "41.66590000", "lng": "-91.57310307", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/528a/6698911ff30aa648af4d0a5cf0dd9ee90b5c.pdf"}, {"id": "9c1b132243e0dcacde1717ce1cfe730a74bd8cbc", "title": "Hippocampus Is Place of Interaction between Unconscious and Conscious Memories", "addresses": [{"address": "University of Geneva", "lat": "42.57054745", "lng": "-88.55578627", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/9c1b/132243e0dcacde1717ce1cfe730a74bd8cbc.pdf"}, {"id": "4fb9f05dc03eb4983d8f9a815745bb47970f1b93", "title": "On Robust Face Recognition via Sparse Encoding: the Good, the Bad, and the Ugly", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}, {"address": "Queensland University of Technology", "lat": "-27.47715625", "lng": "153.02841004", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/f4ee/4f7ac7585f7ea0db3b27c5ad016dbfb0feac.pdf"}, {"id": "b9df25cc4be2f703b059da93823bad6e8e8c0659", "title": "Local Gabor Binary Pattern Whitened PCA: A Novel Approach for Face Recognition from Single Image Per Person", "addresses": [{"address": "University of Nottingham", "lat": "52.93874280", "lng": "-1.20029569", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/b9df/25cc4be2f703b059da93823bad6e8e8c0659.pdf"}, {"id": "57ba4b6de23a6fc9d45ff052ed2563e5de00b968", "title": "An efficient deep neural networks training framework for robust face recognition", "addresses": [{"address": "Xiamen University", "lat": "24.43994190", "lng": "118.09301781", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296993"}, {"id": "90bd16caa44086db6f0e4bbc1dde7063cb71b7b8", "title": "Structured Doubly Stochastic Matrix for Graph Based Clustering: Structured Doubly Stochastic Matrix", "addresses": [{"address": "University of Texas at Arlington", "lat": "32.72836830", "lng": "-97.11201835", "type": "edu"}], "year": 2016, "pdf": "http://www.kdd.org/kdd2016/papers/files/rfp1162-wangA.pdf"}, {"id": "15d1582c8b65dbab5ca027467718a2c286ddce7a", "title": "On robust face recognition via sparse coding: the good, the bad and the ugly", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}, {"address": "Queensland University of Technology", "lat": "-27.47715625", "lng": "153.02841004", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/15d1/582c8b65dbab5ca027467718a2c286ddce7a.pdf"}, {"id": "e19a4dadf60848309c8fd7445d97918da654df76", "title": "JPEG Compressed Domain Face Recognition : Different Stages and Different Features", "addresses": [{"address": "Islamic Azad University", "lat": "34.84529990", "lng": "48.55962120", "type": "edu"}], "year": "2013", "pdf": "https://pdfs.semanticscholar.org/e19a/4dadf60848309c8fd7445d97918da654df76.pdf"}, {"id": "d1633dc3706580c8b9d98c4c0dfa9f9a29360ca3", "title": "Empirically Analyzing the Effect of Dataset Biases on Deep Face Recognition Systems", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1712.01619.pdf"}, {"id": "e104e213faa97d9a9c8b8e1f15b7431c601cb250", "title": "Modeling of facial aging and kinship: A survey", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Middlesex University", "lat": "51.59029705", "lng": "-0.22963221", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1802.04636.pdf"}, {"id": "da6696345d0d4ff6328c1c5916b0ca870d4cc6cf", "title": "Robust Contrast-Invariant EigenDetection", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/da66/96345d0d4ff6328c1c5916b0ca870d4cc6cf.pdf"}, {"id": "3b3550680136aa2fe3bd57c9faa3bfa0dfb3e748", "title": "Forensic Face Recognition: a Survey", "addresses": [{"address": "University of Twente", "lat": "52.23801390", "lng": "6.85667610", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/3b35/50680136aa2fe3bd57c9faa3bfa0dfb3e748.pdf"}, {"id": "4ba3f9792954ee3ba894e1e330cd77da4668fa22", "title": "Nearest Neighbor Discriminant Analysis", "addresses": [{"address": "Fudan University", "lat": "31.30104395", "lng": "121.50045497", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/4ba3/f9792954ee3ba894e1e330cd77da4668fa22.pdf"}, {"id": "472ba8dd4ec72b34e85e733bccebb115811fd726", "title": "Cosine Similarity Metric Learning for Face Verification", "addresses": [{"address": "University of Nottingham", "lat": "52.93874280", "lng": "-1.20029569", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/472b/a8dd4ec72b34e85e733bccebb115811fd726.pdf"}]}
\ No newline at end of file diff --git a/site/datasets/final/ijb_c.csv b/site/datasets/final/ijb_c.csv new file mode 100644 index 00000000..15bfccab --- /dev/null +++ b/site/datasets/final/ijb_c.csv @@ -0,0 +1,141 @@ +index,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,IJB-A,ijb_c,0.0,0.0,,,140c95e53c619eac594d70f6369f518adfea12ef,main,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf,Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A,2015 +1,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,872dfdeccf99bbbed7c8f1ea08afb2d713ebe085,citation,https://arxiv.org/pdf/1703.09507.pdf,L2-constrained Softmax Loss for Discriminative Face Verification,2017 +2,IJB-A,ijb_c,38.8920756,-104.79716389,"University of Colorado, Colorado Springs",edu,146a7ecc7e34b85276dd0275c337eff6ba6ef8c0,citation,https://arxiv.org/pdf/1611.06158v1.pdf,AFFACT: Alignment-free facial attribute classification technique,2017 +3,IJB-A,ijb_c,51.7534538,-1.25400997,University of Oxford,edu,313d5eba97fe064bdc1f00b7587a4b3543ef712a,citation,https://pdfs.semanticscholar.org/cb7f/93467b0ec1afd43d995e511f5d7bf052a5af.pdf,Compact Deep Aggregation for Set Retrieval,2018 +4,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,5865b6d83ba6dbbf9167f1481e9339c2ef1d1f6b,citation,https://doi.org/10.1109/ICPR.2016.7900278,Regularized metric adaptation for unconstrained face verification,2016 +5,IJB-A,ijb_c,37.4102193,-122.05965487,Carnegie Mellon University,edu,48a9241edda07252c1aadca09875fabcfee32871,citation,https://arxiv.org/pdf/1611.08657v5.pdf,Convolutional Experts Constrained Local Model for Facial Landmark Detection,2017 +6,IJB-A,ijb_c,42.718568,-84.47791571,Michigan State University,edu,86204fc037936754813b91898377e8831396551a,citation,https://arxiv.org/pdf/1709.01442.pdf,Dense Face Alignment,2017 +7,IJB-A,ijb_c,22.57423855,88.4337303,"Institute of Engineering and Management, Kolkata, India",edu,b2cb335ded99b10f37002d09753bd5a6ea522ef1,citation,https://doi.org/10.1109/ISBA.2017.7947679,Analysis of adaptability of deep features for verifying blurred and cross-resolution images,2017 +8,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,b2cb335ded99b10f37002d09753bd5a6ea522ef1,citation,https://doi.org/10.1109/ISBA.2017.7947679,Analysis of adaptability of deep features for verifying blurred and cross-resolution images,2017 +9,IJB-A,ijb_c,45.7835966,4.7678948,École Centrale de Lyon,edu,486840f4f524e97f692a7f6b42cd19019ee71533,citation,https://arxiv.org/pdf/1703.08388v2.pdf,DeepVisage: Making Face Recognition Simple Yet With Powerful Generalization Skills,2017 +10,IJB-A,ijb_c,48.832493,2.267474,Safran Identity and Security,company,486840f4f524e97f692a7f6b42cd19019ee71533,citation,https://arxiv.org/pdf/1703.08388v2.pdf,DeepVisage: Making Face Recognition Simple Yet With Powerful Generalization Skills,2017 +11,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,2d748f8ee023a5b1fbd50294d176981ded4ad4ee,citation,http://pdfs.semanticscholar.org/2d74/8f8ee023a5b1fbd50294d176981ded4ad4ee.pdf,Triplet Similarity Embedding for Face Verification,2016 +12,IJB-A,ijb_c,38.99203005,-76.9461029,University of Maryland College Park,edu,f7824758800a7b1a386db5bd35f84c81454d017a,citation,https://arxiv.org/pdf/1702.05085.pdf,KEPLER: Keypoint and Pose Estimation of Unconstrained Faces by Learning Efficient H-CNN Regressors,2017 +13,IJB-A,ijb_c,42.718568,-84.47791571,Michigan State University,edu,02467703b6e087799e04e321bea3a4c354c5487d,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.27,Grouper: Optimizing Crowdsourced Face Annotations,2016 +14,IJB-A,ijb_c,39.329053,-76.619425,Johns Hopkins University,edu,377f2b65e6a9300448bdccf678cde59449ecd337,citation,https://arxiv.org/pdf/1804.10275.pdf,Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results,2018 +15,IJB-A,ijb_c,40.47913175,-74.43168868,Rutgers University,edu,377f2b65e6a9300448bdccf678cde59449ecd337,citation,https://arxiv.org/pdf/1804.10275.pdf,Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results,2018 +16,IJB-A,ijb_c,42.718568,-84.47791571,Michigan State University,edu,cd55fb30737625e86454a2861302b96833ed549d,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139094,Annotating Unconstrained Face Imagery: A scalable approach,2015 +17,IJB-A,ijb_c,38.95187,-77.363259,"Noblis, Falls Church, VA, U.S.A.",company,cd55fb30737625e86454a2861302b96833ed549d,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139094,Annotating Unconstrained Face Imagery: A scalable approach,2015 +18,IJB-A,ijb_c,46.0501558,14.46907327,University of Ljubljana,edu,5226296884b3e151ce317a37f94827dbda0b9d16,citation,https://doi.org/10.1109/IWBF.2016.7449690,Deep pair-wise similarity learning for face recognition,2016 +19,IJB-A,ijb_c,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,80be8624771104ff4838dcba9629bacfe6b3ea09,citation,http://www.ifp.illinois.edu/~moulin/Papers/ECCV14-jiwen.pdf,Simultaneous Feature and Dictionary Learning for Image Set Based Face Recognition,2014 +20,IJB-A,ijb_c,1.3484104,103.68297965,Nanyang Technological University,edu,80be8624771104ff4838dcba9629bacfe6b3ea09,citation,http://www.ifp.illinois.edu/~moulin/Papers/ECCV14-jiwen.pdf,Simultaneous Feature and Dictionary Learning for Image Set Based Face Recognition,2014 +21,IJB-A,ijb_c,40.11116745,-88.22587665,"University of Illinois, Urbana-Champaign",edu,80be8624771104ff4838dcba9629bacfe6b3ea09,citation,http://www.ifp.illinois.edu/~moulin/Papers/ECCV14-jiwen.pdf,Simultaneous Feature and Dictionary Learning for Image Set Based Face Recognition,2014 +22,IJB-A,ijb_c,22.304572,114.17976285,Hong Kong Polytechnic University,edu,50b58becaf67e92a6d9633e0eea7d352157377c3,citation,https://pdfs.semanticscholar.org/50b5/8becaf67e92a6d9633e0eea7d352157377c3.pdf,Dependency-Aware Attention Control for Unconstrained Face Recognition with Image Sets,2018 +23,IJB-A,ijb_c,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,cd6aaa37fffd0b5c2320f386be322b8adaa1cc68,citation,https://arxiv.org/pdf/1804.06655.pdf,Deep Face Recognition: A Survey,2018 +24,IJB-A,ijb_c,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,ac2881bdf7b57dc1672a17b221d68a438d79fce8,citation,https://arxiv.org/pdf/1806.08472.pdf,Learning a High Fidelity Pose Invariant Model for High-resolution Face Frontalization,2018 +25,IJB-A,ijb_c,40.0044795,116.370238,Chinese Academy of Sciences,edu,72a7eb68f0955564e1ceafa75aeeb6b5bbb14e7e,citation,https://pdfs.semanticscholar.org/72a7/eb68f0955564e1ceafa75aeeb6b5bbb14e7e.pdf,Face Recognition with Contrastive Convolution,2018 +26,IJB-A,ijb_c,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,72a7eb68f0955564e1ceafa75aeeb6b5bbb14e7e,citation,https://pdfs.semanticscholar.org/72a7/eb68f0955564e1ceafa75aeeb6b5bbb14e7e.pdf,Face Recognition with Contrastive Convolution,2018 +27,IJB-A,ijb_c,42.3889785,-72.5286987,University of Massachusetts,edu,368e99f669ea5fd395b3193cd75b301a76150f9d,citation,https://arxiv.org/pdf/1506.01342.pdf,One-to-many face recognition with bilinear CNNs,2016 +28,IJB-A,ijb_c,32.77824165,34.99565673,Open University of Israel,edu,1e6ed6ca8209340573a5e907a6e2e546a3bf2d28,citation,http://arxiv.org/pdf/1607.01450v1.pdf,Pooling Faces: Template Based Face Recognition with Pooled Face Images,2016 +29,IJB-A,ijb_c,38.88140235,121.52281098,Dalian University of Technology,edu,052f994898c79529955917f3dfc5181586282cf8,citation,https://arxiv.org/pdf/1708.02191.pdf,Unsupervised Domain Adaptation for Face Recognition in Unlabeled Videos,2017 +30,IJB-A,ijb_c,32.9820799,-96.7566278,University of Texas at Dallas,edu,4e8168fbaa615009d1618a9d6552bfad809309e9,citation,http://pdfs.semanticscholar.org/4e81/68fbaa615009d1618a9d6552bfad809309e9.pdf,Deep Convolutional Neural Network Features and the Original Image,2016 +31,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,4e8168fbaa615009d1618a9d6552bfad809309e9,citation,http://pdfs.semanticscholar.org/4e81/68fbaa615009d1618a9d6552bfad809309e9.pdf,Deep Convolutional Neural Network Features and the Original Image,2016 +32,IJB-A,ijb_c,29.7207902,-95.34406271,University of Houston,edu,3cb2841302af1fb9656f144abc79d4f3d0b27380,citation,https://pdfs.semanticscholar.org/3cb2/841302af1fb9656f144abc79d4f3d0b27380.pdf,When 3 D-Aided 2 D Face Recognition Meets Deep Learning : An extended UR 2 D for Pose-Invariant Face Recognition,2017 +33,IJB-A,ijb_c,24.4469025,54.3942563,Khalifa University,edu,0c1d85a197a1f5b7376652a485523e616a406273,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.169,Joint Registration and Representation Learning for Unconstrained Face Identification,2017 +34,IJB-A,ijb_c,-35.23656905,149.08446994,University of Canberra,edu,0c1d85a197a1f5b7376652a485523e616a406273,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.169,Joint Registration and Representation Learning for Unconstrained Face Identification,2017 +35,IJB-A,ijb_c,32.77824165,34.99565673,Open University of Israel,edu,c75e6ce54caf17b2780b4b53f8d29086b391e839,citation,https://arxiv.org/pdf/1802.00542.pdf,"ExpNet: Landmark-Free, Deep, 3D Facial Expressions",2018 +36,IJB-A,ijb_c,42.718568,-84.47791571,Michigan State University,edu,450c6a57f19f5aa45626bb08d7d5d6acdb863b4b,citation,https://arxiv.org/pdf/1805.00611.pdf,Towards Interpretable Face Recognition,2018 +37,IJB-A,ijb_c,51.7534538,-1.25400997,University of Oxford,edu,30180f66d5b4b7c0367e4b43e2b55367b72d6d2a,citation,http://www.robots.ox.ac.uk/~vgg/publications/2017/Crosswhite17/crosswhite17.pdf,Template Adaptation for Face Verification and Identification,2017 +38,IJB-A,ijb_c,29.7207902,-95.34406271,University of Houston,edu,8334da483f1986aea87b62028672836cb3dc6205,citation,https://arxiv.org/pdf/1805.06306.pdf,Fully Associative Patch-Based 1-to-N Matcher for Face Recognition,2018 +39,IJB-A,ijb_c,-33.8809651,151.20107299,University of Technology Sydney,edu,3b64efa817fd609d525c7244a0e00f98feacc8b4,citation,http://doi.acm.org/10.1145/2845089,A Comprehensive Survey on Pose-Invariant Face Recognition,2016 +40,IJB-A,ijb_c,40.9153196,-73.1270626,Stony Brook University,edu,6fbb179a4ad39790f4558dd32316b9f2818cd106,citation,http://pdfs.semanticscholar.org/6fbb/179a4ad39790f4558dd32316b9f2818cd106.pdf,Input Aggregated Network for Face Video Representation,2016 +41,IJB-A,ijb_c,38.8920756,-104.79716389,"University of Colorado, Colorado Springs",edu,d4f1eb008eb80595bcfdac368e23ae9754e1e745,citation,https://arxiv.org/pdf/1708.02337.pdf,Unconstrained Face Detection and Open-Set Face Recognition Challenge,2017 +42,IJB-A,ijb_c,33.5866784,-101.87539204,Electrical and Computer Engineering,edu,ebb3d5c70bedf2287f9b26ac0031004f8f617b97,citation,https://doi.org/10.1109/MSP.2017.2764116,"Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans",2018 +43,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,ebb3d5c70bedf2287f9b26ac0031004f8f617b97,citation,https://doi.org/10.1109/MSP.2017.2764116,"Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans",2018 +44,IJB-A,ijb_c,34.0224149,-118.28634407,University of Southern California,edu,d28d32af7ef9889ef9cb877345a90ea85e70f7f1,citation,http://doi.ieeecomputersociety.org/10.1109/FG.2017.84,Local-Global Landmark Confidences for Face Recognition,2017 +45,IJB-A,ijb_c,37.4102193,-122.05965487,Carnegie Mellon University,edu,d28d32af7ef9889ef9cb877345a90ea85e70f7f1,citation,http://doi.ieeecomputersociety.org/10.1109/FG.2017.84,Local-Global Landmark Confidences for Face Recognition,2017 +46,IJB-A,ijb_c,51.5247272,-0.03931035,Queen Mary University of London,edu,a29566375836f37173ccaffa47dea25eb1240187,citation,https://arxiv.org/pdf/1809.09409.pdf,Vehicle Re-Identification in Context,2018 +47,IJB-A,ijb_c,34.0224149,-118.28634407,University of Southern California,edu,29f298dd5f806c99951cb434834bc8dcc765df18,citation,https://doi.org/10.1109/ICPR.2016.7899837,Computationally efficient template-based face recognition,2016 +48,IJB-A,ijb_c,51.49887085,-0.17560797,Imperial College London,edu,54bb25a213944b08298e4e2de54f2ddea890954a,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf,"AgeDB: The First Manually Collected, In-the-Wild Age Database",2017 +49,IJB-A,ijb_c,51.59029705,-0.22963221,Middlesex University,edu,54bb25a213944b08298e4e2de54f2ddea890954a,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf,"AgeDB: The First Manually Collected, In-the-Wild Age Database",2017 +50,IJB-A,ijb_c,50.8142701,8.771435,Philipps-Universität Marburg,edu,5981c309bd0ffd849c51b1d8a2ccc481a8ec2f5c,citation,https://doi.org/10.1109/ICT.2017.7998256,SmartFace: Efficient face detection on smartphones for wireless on-demand emergency networks,2017 +51,IJB-A,ijb_c,42.718568,-84.47791571,Michigan State University,edu,a2b4a6c6b32900a066d0257ae6d4526db872afe2,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272466,Learning Face Image Quality From Human Assessments,2018 +52,IJB-A,ijb_c,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,3dfb822e16328e0f98a47209d7ecd242e4211f82,citation,https://arxiv.org/pdf/1708.08197.pdf,Cross-Age LFW: A Database for Studying Cross-Age Face Recognition in Unconstrained Environments,2017 +53,IJB-A,ijb_c,47.6423318,-122.1369302,Microsoft,company,291265db88023e92bb8c8e6390438e5da148e8f5,citation,http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf,MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition,2016 +54,IJB-A,ijb_c,42.718568,-84.47791571,Michigan State University,edu,d29eec5e047560627c16803029d2eb8a4e61da75,citation,http://pdfs.semanticscholar.org/d29e/ec5e047560627c16803029d2eb8a4e61da75.pdf,Feature Transfer Learning for Deep Face Recognition with Long-Tail Data,2018 +55,IJB-A,ijb_c,36.20304395,117.05842113,Tianjin University,edu,5180df9d5eb26283fb737f491623395304d57497,citation,https://arxiv.org/pdf/1804.10899.pdf,Scalable Angular Discriminative Deep Metric Learning for Face Recognition,2018 +56,IJB-A,ijb_c,22.42031295,114.20788644,Chinese University of Hong Kong,edu,abdd17e411a7bfe043f280abd4e560a04ab6e992,citation,https://arxiv.org/pdf/1803.00839.pdf,Pose-Robust Face Recognition via Deep Residual Equivariant Mapping,2018 +57,IJB-A,ijb_c,28.5456282,77.2731505,"IIIT Delhi, India",edu,3cf1f89d73ca4b25399c237ed3e664a55cd273a2,citation,https://arxiv.org/pdf/1710.02914.pdf,Face Sketch Matching via Coupled Deep Transform Learning,2017 +58,IJB-A,ijb_c,-27.49741805,153.01316956,University of Queensland,edu,f27fd2a1bc229c773238f1912db94991b8bf389a,citation,https://doi.org/10.1109/IVCNZ.2016.7804414,How do you develop a face detector for the unconstrained environment?,2016 +59,IJB-A,ijb_c,39.86742125,32.73519072,Hacettepe University,edu,9865fe20df8fe11717d92b5ea63469f59cf1635a,citation,https://arxiv.org/pdf/1805.07566.pdf,Wildest Faces: Face Detection and Recognition in Violent Settings,2018 +60,IJB-A,ijb_c,39.87549675,32.78553506,Middle East Technical University,edu,9865fe20df8fe11717d92b5ea63469f59cf1635a,citation,https://arxiv.org/pdf/1805.07566.pdf,Wildest Faces: Face Detection and Recognition in Violent Settings,2018 +61,IJB-A,ijb_c,28.2290209,112.99483204,"National University of Defense Technology, China",edu,c1cc2a2a1ab66f6c9c6fabe28be45d1440a57c3d,citation,https://pdfs.semanticscholar.org/aae7/a5182e59f44b7bb49f61999181ce011f800b.pdf,Dual-Agent GANs for Photorealistic and Identity Preserving Profile Face Synthesis,2017 +62,IJB-A,ijb_c,1.2962018,103.77689944,National University of Singapore,edu,c1cc2a2a1ab66f6c9c6fabe28be45d1440a57c3d,citation,https://pdfs.semanticscholar.org/aae7/a5182e59f44b7bb49f61999181ce011f800b.pdf,Dual-Agent GANs for Photorealistic and Identity Preserving Profile Face Synthesis,2017 +63,IJB-A,ijb_c,17.4454957,78.34854698,International Institute of Information Technology,edu,f5eb411217f729ad7ae84bfd4aeb3dedb850206a,citation,https://pdfs.semanticscholar.org/f5eb/411217f729ad7ae84bfd4aeb3dedb850206a.pdf,Tackling Low Resolution for Better Scene Understanding,2018 +64,IJB-A,ijb_c,40.51865195,-74.44099801,State University of New Jersey,edu,96e731e82b817c95d4ce48b9e6b08d2394937cf8,citation,http://arxiv.org/pdf/1508.01722v2.pdf,Unconstrained face verification using deep CNN features,2016 +65,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,96e731e82b817c95d4ce48b9e6b08d2394937cf8,citation,http://arxiv.org/pdf/1508.01722v2.pdf,Unconstrained face verification using deep CNN features,2016 +66,IJB-A,ijb_c,32.77824165,34.99565673,Open University of Israel,edu,870433ba89d8cab1656e57ac78f1c26f4998edfb,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.163,Regressing Robust and Discriminative 3D Morphable Models with a Very Deep Neural Network,2017 +67,IJB-A,ijb_c,55.6801502,12.572327,University of Copenhagen,edu,3dfd94d3fad7e17f52a8ae815eb9cc5471172bc0,citation,http://pdfs.semanticscholar.org/3dfd/94d3fad7e17f52a8ae815eb9cc5471172bc0.pdf,Face2Text: Collecting an Annotated Image Description Corpus for the Generation of Rich Face Descriptions,2018 +68,IJB-A,ijb_c,35.9023226,14.4834189,University of Malta,edu,3dfd94d3fad7e17f52a8ae815eb9cc5471172bc0,citation,http://pdfs.semanticscholar.org/3dfd/94d3fad7e17f52a8ae815eb9cc5471172bc0.pdf,Face2Text: Collecting an Annotated Image Description Corpus for the Generation of Rich Face Descriptions,2018 +69,IJB-A,ijb_c,34.0224149,-118.28634407,University of Southern California,edu,6341274aca0c2977c3e1575378f4f2126aa9b050,citation,http://arxiv.org/pdf/1609.03536v1.pdf,A multi-scale cascade fully convolutional network face detector,2016 +70,IJB-A,ijb_c,41.70456775,-86.23822026,University of Notre Dame,edu,17479e015a2dcf15d40190e06419a135b66da4e0,citation,https://arxiv.org/pdf/1610.08119.pdf,Predicting First Impressions With Deep Learning,2017 +71,IJB-A,ijb_c,37.4102193,-122.05965487,Carnegie Mellon University,edu,a0b1990dd2b4cd87e4fd60912cc1552c34792770,citation,https://pdfs.semanticscholar.org/a0b1/990dd2b4cd87e4fd60912cc1552c34792770.pdf,Deep Constrained Local Models for Facial Landmark Detection,2016 +72,IJB-A,ijb_c,30.642769,104.06751175,"Sichuan University, Chengdu",edu,772474b5b0c90629f4d9c223fd9c1ef45e1b1e66,citation,https://doi.org/10.1109/BTAS.2017.8272716,Multi-dim: A multi-dimensional face database towards the application of 3D technology in real-world scenarios,2017 +73,IJB-A,ijb_c,38.8920756,-104.79716389,"University of Colorado, Colorado Springs",edu,4b3f425274b0c2297d136f8833a31866db2f2aec,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.85,Toward Open-Set Face Recognition,2017 +74,IJB-A,ijb_c,56.46255985,84.95565495,Tomsk Polytechnic University,edu,17ded725602b4329b1c494bfa41527482bf83a6f,citation,http://pdfs.semanticscholar.org/cb10/434a5d68ffbe9ed0498771192564ecae8894.pdf,Compact Convolutional Neural Network Cascade for Face Detection,2015 +75,IJB-A,ijb_c,37.3351908,-121.88126008,San Jose State University,edu,14b016c7a87d142f4b9a0e6dc470dcfc073af517,citation,http://ws680.nist.gov/publication/get_pdf.cfm?pub_id=918912,Modest proposals for improving biometric recognition papers,2015 +76,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,93420d9212dd15b3ef37f566e4d57e76bb2fab2f,citation,https://arxiv.org/pdf/1611.00851.pdf,An All-In-One Convolutional Neural Network for Face Analysis,2017 +77,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,def2983576001bac7d6461d78451159800938112,citation,https://arxiv.org/pdf/1705.07426.pdf,The Do’s and Don’ts for CNN-Based Face Verification,2017 +78,IJB-A,ijb_c,42.718568,-84.47791571,Michigan State University,edu,4b605e6a9362485bfe69950432fa1f896e7d19bf,citation,http://biometrics.cse.msu.edu/Publications/Face/BlantonAllenMillerKalkaJain_CVPRWB2016_HID.pdf,A Comparison of Human and Automated Face Verification Accuracy on Unconstrained Image Sets,2016 +79,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,8d3e95c31c93548b8c71dbeee2e9f7180067a888,citation,https://doi.org/10.1109/ICPR.2016.7899841,Template regularized sparse coding for face verification,2016 +80,IJB-A,ijb_c,42.8271556,-73.8780481,GE Global Research,company,8d3e95c31c93548b8c71dbeee2e9f7180067a888,citation,https://doi.org/10.1109/ICPR.2016.7899841,Template regularized sparse coding for face verification,2016 +81,IJB-A,ijb_c,25.0410728,121.6147562,Institute of Information Science,edu,337dd4aaca2c5f9b5d2de8e0e2401b5a8feb9958,citation,https://arxiv.org/pdf/1810.11160.pdf,Data-specific Adaptive Threshold for Face Recognition and Authentication,2018 +82,IJB-A,ijb_c,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,0aeb5020003e0c89219031b51bd30ff1bceea363,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.525,Sparsifying Neural Network Connections for Face Recognition,2016 +83,IJB-A,ijb_c,22.42031295,114.20788644,Chinese University of Hong Kong,edu,0aeb5020003e0c89219031b51bd30ff1bceea363,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.525,Sparsifying Neural Network Connections for Face Recognition,2016 +84,IJB-A,ijb_c,42.718568,-84.47791571,Michigan State University,edu,99daa2839213f904e279aec7cef26c1dfb768c43,citation,https://arxiv.org/pdf/1805.02283.pdf,DocFace: Matching ID Document Photos to Selfies,2018 +85,IJB-A,ijb_c,43.7776426,11.259765,University of Florence,edu,71ca8b6e84c17b3e68f980bfb8cddc837100f8bf,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899774,Effective 3D based frontalization for unconstrained face recognition,2016 +86,IJB-A,ijb_c,51.49887085,-0.17560797,Imperial College London,edu,c43ed9b34cad1a3976bac7979808eb038d88af84,citation,https://arxiv.org/pdf/1804.03675.pdf,Semi-supervised Adversarial Learning to Generate Photorealistic Face Images of New Identities from 3D Morphable Model,2018 +87,IJB-A,ijb_c,51.24303255,-0.59001382,University of Surrey,edu,c43ed9b34cad1a3976bac7979808eb038d88af84,citation,https://arxiv.org/pdf/1804.03675.pdf,Semi-supervised Adversarial Learning to Generate Photorealistic Face Images of New Identities from 3D Morphable Model,2018 +88,IJB-A,ijb_c,37.3936717,-122.0807262,Facebook,company,628a3f027b7646f398c68a680add48c7969ab1d9,citation,https://pdfs.semanticscholar.org/628a/3f027b7646f398c68a680add48c7969ab1d9.pdf,Plan for Final Year Project : HKU-Face : A Large Scale Dataset for Deep Face Recognition,2017 +89,IJB-A,ijb_c,40.2773077,-7.5095801,University of Beira Interior,edu,61262450d4d814865a4f9a84299c24daa493f66e,citation,http://doi.org/10.1007/s10462-016-9474-x,Biometric recognition in surveillance scenarios: a survey,2016 +90,IJB-A,ijb_c,-31.95040445,115.79790037,University of Western Australia,edu,626913b8fcbbaee8932997d6c4a78fe1ce646127,citation,https://arxiv.org/pdf/1711.05942.pdf,Learning from Millions of 3D Scans for Large-scale 3D Face Recognition,2017 +91,IJB-A,ijb_c,35.9023226,14.4834189,University of Malta,edu,4efd58102ff46b7435c9ec6d4fc3dd21d93b15b4,citation,https://doi.org/10.1109/TIFS.2017.2788002,"Matching Software-Generated Sketches to Face Photographs With a Very Deep CNN, Morphed Faces, and Transfer Learning",2018 +92,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,b6f758be954d34817d4ebaa22b30c63a4b8ddb35,citation,http://arxiv.org/abs/1703.04835,A Proximity-Aware Hierarchical Clustering of Faces,2017 +93,IJB-A,ijb_c,32.77824165,34.99565673,Open University of Israel,edu,0a34fe39e9938ae8c813a81ae6d2d3a325600e5c,citation,https://arxiv.org/pdf/1708.07517.pdf,FacePoseNet: Making a Case for Landmark-Free Face Alignment,2017 +94,IJB-A,ijb_c,40.2773077,-7.5095801,University of Beira Interior,edu,84ae55603bffda40c225fe93029d39f04793e01f,citation,https://doi.org/10.1109/ICB.2016.7550066,ICB-RW 2016: International challenge on biometric recognition in the wild,2016 +95,IJB-A,ijb_c,41.70456775,-86.23822026,University of Notre Dame,edu,73ea06787925157df519a15ee01cc3dc1982a7e0,citation,https://arxiv.org/pdf/1811.01474.pdf,Fast Face Image Synthesis with Minimal Training,2018 +96,IJB-A,ijb_c,42.718568,-84.47791571,Michigan State University,edu,c6382de52636705be5898017f2f8ed7c70d7ae96,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139089,Unconstrained face detection: State of the art baseline and challenges,2015 +97,IJB-A,ijb_c,38.95187,-77.363259,"Noblis, Falls Church, VA, U.S.A.",company,c6382de52636705be5898017f2f8ed7c70d7ae96,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139089,Unconstrained face detection: State of the art baseline and challenges,2015 +98,IJB-A,ijb_c,40.47913175,-74.43168868,Rutgers University,edu,eee06d68497be8bf3a8aba4fde42a13aa090b301,citation,https://arxiv.org/pdf/1806.11191.pdf,CR-GAN: Learning Complete Representations for Multi-view Generation,2018 +99,IJB-A,ijb_c,35.3103441,-80.73261617,University of North Carolina at Charlotte,edu,eee06d68497be8bf3a8aba4fde42a13aa090b301,citation,https://arxiv.org/pdf/1806.11191.pdf,CR-GAN: Learning Complete Representations for Multi-view Generation,2018 +100,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,a3201e955d6607d383332f3a12a7befa08c5a18c,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900276,VLAD encoded Deep Convolutional features for unconstrained face verification,2016 +101,IJB-A,ijb_c,40.47913175,-74.43168868,Rutgers University,edu,a3201e955d6607d383332f3a12a7befa08c5a18c,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900276,VLAD encoded Deep Convolutional features for unconstrained face verification,2016 +102,IJB-A,ijb_c,22.42031295,114.20788644,Chinese University of Hong Kong,edu,52d7eb0fbc3522434c13cc247549f74bb9609c5d,citation,https://arxiv.org/pdf/1511.06523.pdf,WIDER FACE: A Face Detection Benchmark,2016 +103,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,19458454308a9f56b7de76bf7d8ff8eaa52b0173,citation,https://pdfs.semanticscholar.org/1945/8454308a9f56b7de76bf7d8ff8eaa52b0173.pdf,Deep Features for Recognizing Disguised Faces in the Wild,0 +104,IJB-A,ijb_c,43.7776426,11.259765,University of Florence,edu,746c0205fdf191a737df7af000eaec9409ede73f,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8423119,Investigating Nuisances in DCNN-Based Face Recognition,2018 +105,IJB-A,ijb_c,47.5612651,7.5752961,University of Basel,edu,0081e2188c8f34fcea3e23c49fb3e17883b33551,citation,http://pdfs.semanticscholar.org/0081/e2188c8f34fcea3e23c49fb3e17883b33551.pdf,Training Deep Face Recognition Systems with Synthetic Data,2018 +106,IJB-A,ijb_c,37.4102193,-122.05965487,Carnegie Mellon University,edu,2b869d5551b10f13bf6fcdb8d13f0aa4d1f59fc4,citation,https://arxiv.org/pdf/1803.00130.pdf,Ring loss: Convex Feature Normalization for Face Recognition,2018 +107,IJB-A,ijb_c,28.2290209,112.99483204,"National University of Defense Technology, China",edu,5f771fed91c8e4b666489ba2384d0705bcf75030,citation,https://arxiv.org/pdf/1804.03287.pdf,Understanding Humans in Crowded Scenes: Deep Nested Adversarial Learning and A New Benchmark for Multi-Human Parsing,2018 +108,IJB-A,ijb_c,1.2962018,103.77689944,National University of Singapore,edu,5f771fed91c8e4b666489ba2384d0705bcf75030,citation,https://arxiv.org/pdf/1804.03287.pdf,Understanding Humans in Crowded Scenes: Deep Nested Adversarial Learning and A New Benchmark for Multi-Human Parsing,2018 +109,IJB-A,ijb_c,42.3889785,-72.5286987,University of Massachusetts,edu,2241eda10b76efd84f3c05bdd836619b4a3df97e,citation,http://arxiv.org/pdf/1506.01342v5.pdf,One-to-many face recognition with bilinear CNNs,2016 +110,IJB-A,ijb_c,22.42031295,114.20788644,Chinese University of Hong Kong,edu,58d76380d194248b3bb291b8c7c5137a0a376897,citation,https://pdfs.semanticscholar.org/58d7/6380d194248b3bb291b8c7c5137a0a376897.pdf,FaceID-GAN : Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis,2018 +111,IJB-A,ijb_c,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,58d76380d194248b3bb291b8c7c5137a0a376897,citation,https://pdfs.semanticscholar.org/58d7/6380d194248b3bb291b8c7c5137a0a376897.pdf,FaceID-GAN : Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis,2018 +112,IJB-A,ijb_c,42.718568,-84.47791571,Michigan State University,edu,7fb5006b6522436ece5bedf509e79bdb7b79c9a7,citation,https://pdfs.semanticscholar.org/7fb5/006b6522436ece5bedf509e79bdb7b79c9a7.pdf,Multi-Task Convolutional Neural Network for Face Recognition,2017 +113,IJB-A,ijb_c,-27.49741805,153.01316956,University of Queensland,edu,28646c6220848db46c6944967298d89a6559c700,citation,https://pdfs.semanticscholar.org/2864/6c6220848db46c6944967298d89a6559c700.pdf,It takes two to tango : Cascading off-the-shelf face detectors,2018 +114,IJB-A,ijb_c,51.7534538,-1.25400997,University of Oxford,edu,5812d8239d691e99d4108396f8c26ec0619767a6,citation,https://arxiv.org/pdf/1810.09951.pdf,GhostVLAD for set-based face recognition,2018 +115,IJB-A,ijb_c,25.01353105,121.54173736,National Taiwan University of Science and Technology,edu,e4c3587392d477b7594086c6f28a00a826abf004,citation,https://doi.org/10.1109/ICIP.2017.8296998,Face recognition by facial attribute assisted network,2017 +116,IJB-A,ijb_c,1.3484104,103.68297965,Nanyang Technological University,edu,47190d213caef85e8b9dd0d271dbadc29ed0a953,citation,https://arxiv.org/pdf/1807.11649.pdf,The Devil of Face Recognition is in the Noise,2018 +117,IJB-A,ijb_c,32.87935255,-117.23110049,"University of California, San Diego",edu,47190d213caef85e8b9dd0d271dbadc29ed0a953,citation,https://arxiv.org/pdf/1807.11649.pdf,The Devil of Face Recognition is in the Noise,2018 +118,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,ce6d60b69eb95477596535227958109e07c61e1e,citation,http://www.rci.rutgers.edu/~vmp93/Conference_pub/BTAS_2015_FVFF_JunCheng_Chen.pdf,Unconstrained face verification using fisher vectors computed from frontalized faces,2015 +119,IJB-A,ijb_c,29.7207902,-95.34406271,University of Houston,edu,38d8ff137ff753f04689e6b76119a44588e143f3,citation,http://pdfs.semanticscholar.org/38d8/ff137ff753f04689e6b76119a44588e143f3.pdf,When 3D-Aided 2D Face Recognition Meets Deep Learning: An extended UR2D for Pose-Invariant Face Recognition,2017 +120,IJB-A,ijb_c,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,9627f28ea5f4c389350572b15968386d7ce3fe49,citation,https://arxiv.org/pdf/1802.07447.pdf,Load Balanced GANs for Multi-view Face Image Synthesis,2018 +121,IJB-A,ijb_c,34.0224149,-118.28634407,University of Southern California,edu,4e7ed13e541b8ed868480375785005d33530e06d,citation,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477555,Face recognition using deep multi-pose representations,2016 +122,IJB-A,ijb_c,32.77824165,34.99565673,Open University of Israel,edu,582edc19f2b1ab2ac6883426f147196c8306685a,citation,http://pdfs.semanticscholar.org/be6c/db7b181e73f546d43cf2ab6bc7181d7d619b.pdf,Do We Really Need to Collect Millions of Faces for Effective Face Recognition?,2016 +123,IJB-A,ijb_c,37.4102193,-122.05965487,Carnegie Mellon University,edu,87e6cb090aecfc6f03a3b00650a5c5f475dfebe1,citation,https://pdfs.semanticscholar.org/87e6/cb090aecfc6f03a3b00650a5c5f475dfebe1.pdf,Holistically Constrained Local Model: Going Beyond Frontal Poses for Facial Landmark Detection,2016 +124,IJB-A,ijb_c,34.0224149,-118.28634407,University of Southern California,edu,87e6cb090aecfc6f03a3b00650a5c5f475dfebe1,citation,https://pdfs.semanticscholar.org/87e6/cb090aecfc6f03a3b00650a5c5f475dfebe1.pdf,Holistically Constrained Local Model: Going Beyond Frontal Poses for Facial Landmark Detection,2016 +125,IJB-A,ijb_c,39.65404635,-79.96475355,West Virginia University,edu,3b9b200e76a35178da940279d566bbb7dfebb787,citation,http://pdfs.semanticscholar.org/3b9b/200e76a35178da940279d566bbb7dfebb787.pdf,Learning Channel Inter-dependencies at Multiple Scales on Dense Networks for Face Recognition,2017 +126,IJB-A,ijb_c,-27.49741805,153.01316956,University of Queensland,edu,de79437f74e8e3b266afc664decf4e6e4bdf34d7,citation,https://doi.org/10.1109/IVCNZ.2016.7804415,To face or not to face: Towards reducing false positive of face detection,2016 +127,IJB-A,ijb_c,46.0501558,14.46907327,University of Ljubljana,edu,368d59cf1733af511ed8abbcbeb4fb47afd4da1c,citation,http://pdfs.semanticscholar.org/368d/59cf1733af511ed8abbcbeb4fb47afd4da1c.pdf,To Frontalize or Not To Frontalize: A Study of Face Pre-Processing Techniques and Their Impact on Recognition,2016 +128,IJB-A,ijb_c,41.70456775,-86.23822026,University of Notre Dame,edu,368d59cf1733af511ed8abbcbeb4fb47afd4da1c,citation,http://pdfs.semanticscholar.org/368d/59cf1733af511ed8abbcbeb4fb47afd4da1c.pdf,To Frontalize or Not To Frontalize: A Study of Face Pre-Processing Techniques and Their Impact on Recognition,2016 +129,IJB-A,ijb_c,32.77824165,34.99565673,Open University of Israel,edu,62e913431bcef5983955e9ca160b91bb19d9de42,citation,http://pdfs.semanticscholar.org/62e9/13431bcef5983955e9ca160b91bb19d9de42.pdf,Facial Landmark Detection with Tweaked Convolutional Neural Networks,2015 +130,IJB-A,ijb_c,29.5084174,106.57858552,Chongqing University,edu,acd4280453b995cb071c33f7c9db5760432f4279,citation,https://doi.org/10.1007/s00138-018-0907-1,Deep transformation learning for face recognition in the unconstrained scene,2018 +131,IJB-A,ijb_c,38.99203005,-76.9461029,University of Maryland College Park,edu,ceeb67bf53ffab1395c36f1141b516f893bada27,citation,http://pdfs.semanticscholar.org/ceeb/67bf53ffab1395c36f1141b516f893bada27.pdf,Face Alignment by Local Deep Descriptor Regression,2016 +132,IJB-A,ijb_c,40.47913175,-74.43168868,Rutgers University,edu,ceeb67bf53ffab1395c36f1141b516f893bada27,citation,http://pdfs.semanticscholar.org/ceeb/67bf53ffab1395c36f1141b516f893bada27.pdf,Face Alignment by Local Deep Descriptor Regression,2016 +133,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,ceeb67bf53ffab1395c36f1141b516f893bada27,citation,http://pdfs.semanticscholar.org/ceeb/67bf53ffab1395c36f1141b516f893bada27.pdf,Face Alignment by Local Deep Descriptor Regression,2016 +134,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,37619564574856c6184005830deda4310d3ca580,citation,https://doi.org/10.1109/BTAS.2015.7358755,A deep pyramid Deformable Part Model for face detection,2015 +135,IJB-A,ijb_c,51.7534538,-1.25400997,University of Oxford,edu,eb027969f9310e0ae941e2adee2d42cdf07d938c,citation,https://arxiv.org/pdf/1710.08092.pdf,VGGFace2: A Dataset for Recognising Faces across Pose and Age,2018 +136,IJB-A,ijb_c,42.3889785,-72.5286987,University of Massachusetts,edu,3c97c32ff575989ef2869f86d89c63005fc11ba9,citation,http://people.cs.umass.edu/~hzjiang/pubs/face_det_fg_2017.pdf,Face Detection with the Faster R-CNN,2017 +137,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,4f7b92bd678772552b3c3edfc9a7c5c4a8c60a8e,citation,https://pdfs.semanticscholar.org/4f7b/92bd678772552b3c3edfc9a7c5c4a8c60a8e.pdf,Deep Density Clustering of Unconstrained Faces,0 +138,IJB-A,ijb_c,1.2962018,103.77689944,National University of Singapore,edu,fca9ebaa30d69ccec8bb577c31693c936c869e72,citation,https://arxiv.org/pdf/1809.00338.pdf,Look Across Elapse: Disentangled Representation Learning and Photorealistic Cross-Age Face Synthesis for Age-Invariant Face Recognition,2018 +139,IJB-A,ijb_c,40.0044795,116.370238,Chinese Academy of Sciences,edu,fca9ebaa30d69ccec8bb577c31693c936c869e72,citation,https://arxiv.org/pdf/1809.00338.pdf,Look Across Elapse: Disentangled Representation Learning and Photorealistic Cross-Age Face Synthesis for Age-Invariant Face Recognition,2018 diff --git a/site/datasets/final/ijb_c.json b/site/datasets/final/ijb_c.json index 9899952e..8b65077f 100644 --- a/site/datasets/final/ijb_c.json +++ b/site/datasets/final/ijb_c.json @@ -1 +1 @@ -{"id": "140c95e53c619eac594d70f6369f518adfea12ef", "paper": {"paper_id": "140c95e53c619eac594d70f6369f518adfea12ef", "key": "ijb_c", "title": "Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A", "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf", "address": "", "name": "IJB-A"}, "address": null, "additional_papers": [{"paper_id": "0cb2dd5f178e3a297a0c33068961018659d0f443", "key": "ijb_c", "title": "IARPA Janus Benchmark-B Face Dataset", "year": 2017, "pdf": "http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf", "address": "", "name": "IJB-A"}, {"paper_id": "57178b36c21fd7f4529ac6748614bb3374714e91", "key": "ijb_c", "title": "IARPA Janus Benchmark - C: Face Dataset and Protocol", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217", "address": "", "name": "IJB-A"}], "citations": [{"id": "872dfdeccf99bbbed7c8f1ea08afb2d713ebe085", "title": "L2-constrained Softmax Loss for Discriminative Face Verification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1703.09507.pdf"}, {"id": "146a7ecc7e34b85276dd0275c337eff6ba6ef8c0", "title": "AFFACT: Alignment-free facial attribute classification technique", "addresses": [{"address": "University of Colorado, Colorado Springs", "lat": "38.89207560", "lng": "-104.79716389", "type": "edu"}], "year": 2017, "pdf": "https://arxiv.org/pdf/1611.06158v1.pdf"}, {"id": "313d5eba97fe064bdc1f00b7587a4b3543ef712a", "title": "Compact Deep Aggregation for Set Retrieval", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/cb7f/93467b0ec1afd43d995e511f5d7bf052a5af.pdf"}, {"id": "5865b6d83ba6dbbf9167f1481e9339c2ef1d1f6b", "title": "Regularized metric adaptation for unconstrained face verification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICPR.2016.7900278"}, {"id": "48a9241edda07252c1aadca09875fabcfee32871", "title": "Convolutional Experts Constrained Local Model for Facial Landmark Detection", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2017, "pdf": "https://arxiv.org/pdf/1611.08657v5.pdf"}, {"id": "86204fc037936754813b91898377e8831396551a", "title": "Dense Face Alignment", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1709.01442.pdf"}, {"id": "b2cb335ded99b10f37002d09753bd5a6ea522ef1", "title": "Analysis of adaptability of deep features for verifying blurred and cross-resolution images", "addresses": [{"address": "Institute of Engineering and Management, Kolkata, India", "lat": "22.57423855", "lng": "88.43373030", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ISBA.2017.7947679"}, {"id": "486840f4f524e97f692a7f6b42cd19019ee71533", "title": "DeepVisage: Making Face Recognition Simple Yet With Powerful Generalization Skills", "addresses": [{"address": "\u00c9cole Centrale de Lyon", "lat": "45.78359660", "lng": "4.76789480", "type": "edu"}, {"address": "Safran Identity and Security", "lat": "48.83249300", "lng": "2.26747400", "type": "company"}], "year": 2017, "pdf": "https://arxiv.org/pdf/1703.08388v2.pdf"}, {"id": "2d748f8ee023a5b1fbd50294d176981ded4ad4ee", "title": "Triplet Similarity Embedding for Face Verification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/2d74/8f8ee023a5b1fbd50294d176981ded4ad4ee.pdf"}, {"id": "f7824758800a7b1a386db5bd35f84c81454d017a", "title": "KEPLER: Keypoint and Pose Estimation of Unconstrained Faces by Learning Efficient H-CNN Regressors", "addresses": [{"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1702.05085.pdf"}, {"id": "02467703b6e087799e04e321bea3a4c354c5487d", "title": "Grouper: Optimizing Crowdsourced Face Annotations", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.27"}, {"id": "377f2b65e6a9300448bdccf678cde59449ecd337", "title": "Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results", "addresses": [{"address": "Johns Hopkins University", "lat": "39.32905300", "lng": "-76.61942500", "type": "edu"}, {"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.10275.pdf"}, {"id": "cd55fb30737625e86454a2861302b96833ed549d", "title": "Annotating Unconstrained Face Imagery: A scalable approach", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}, {"address": "Noblis, Falls Church, VA, U.S.A.", "lat": "38.95187000", "lng": "-77.36325900", "type": "company"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139094"}, {"id": "5226296884b3e151ce317a37f94827dbda0b9d16", "title": "Deep pair-wise similarity learning for face recognition", "addresses": [{"address": "University of Ljubljana", "lat": "46.05015580", "lng": "14.46907327", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/IWBF.2016.7449690"}, {"id": "80be8624771104ff4838dcba9629bacfe6b3ea09", "title": "Simultaneous Feature and Dictionary Learning for Image Set Based Face Recognition", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}, {"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": 2014, "pdf": "http://www.ifp.illinois.edu/~moulin/Papers/ECCV14-jiwen.pdf"}, {"id": "50b58becaf67e92a6d9633e0eea7d352157377c3", "title": "Dependency-Aware Attention Control for Unconstrained Face Recognition with Image Sets", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/50b5/8becaf67e92a6d9633e0eea7d352157377c3.pdf"}, {"id": "cd6aaa37fffd0b5c2320f386be322b8adaa1cc68", "title": "Deep Face Recognition: A Survey", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.06655.pdf"}, {"id": "ac2881bdf7b57dc1672a17b221d68a438d79fce8", "title": "Learning a High Fidelity Pose Invariant Model for High-resolution Face Frontalization", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.08472.pdf"}, {"id": "72a7eb68f0955564e1ceafa75aeeb6b5bbb14e7e", "title": "Face Recognition with Contrastive Convolution", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/72a7/eb68f0955564e1ceafa75aeeb6b5bbb14e7e.pdf"}, {"id": "368e99f669ea5fd395b3193cd75b301a76150f9d", "title": "One-to-many face recognition with bilinear CNNs", "addresses": [{"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1506.01342.pdf"}, {"id": "1e6ed6ca8209340573a5e907a6e2e546a3bf2d28", "title": "Pooling Faces: Template Based Face Recognition with Pooled Face Images", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2016, "pdf": "http://arxiv.org/pdf/1607.01450v1.pdf"}, {"id": "052f994898c79529955917f3dfc5181586282cf8", "title": "Unsupervised Domain Adaptation for Face Recognition in Unlabeled Videos", "addresses": [{"address": "Dalian University of Technology", "lat": "38.88140235", "lng": "121.52281098", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.02191.pdf"}, {"id": "4e8168fbaa615009d1618a9d6552bfad809309e9", "title": "Deep Convolutional Neural Network Features and the Original Image", "addresses": [{"address": "University of Texas at Dallas", "lat": "32.98207990", "lng": "-96.75662780", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4e81/68fbaa615009d1618a9d6552bfad809309e9.pdf"}, {"id": "3cb2841302af1fb9656f144abc79d4f3d0b27380", "title": "When 3 D-Aided 2 D Face Recognition Meets Deep Learning : An extended UR 2 D for Pose-Invariant Face Recognition", "addresses": [{"address": "University of Houston", "lat": "29.72079020", "lng": "-95.34406271", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/3cb2/841302af1fb9656f144abc79d4f3d0b27380.pdf"}, {"id": "0c1d85a197a1f5b7376652a485523e616a406273", "title": "Joint Registration and Representation Learning for Unconstrained Face Identification", "addresses": [{"address": "Khalifa University", "lat": "24.44690250", "lng": "54.39425630", "type": "edu"}, {"address": "University of Canberra", "lat": "-35.23656905", "lng": "149.08446994", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.169"}, {"id": "c75e6ce54caf17b2780b4b53f8d29086b391e839", "title": "ExpNet: Landmark-Free, Deep, 3D Facial Expressions", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1802.00542.pdf"}, {"id": "450c6a57f19f5aa45626bb08d7d5d6acdb863b4b", "title": "Towards Interpretable Face Recognition", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.00611.pdf"}, {"id": "30180f66d5b4b7c0367e4b43e2b55367b72d6d2a", "title": "Template Adaptation for Face Verification and Identification", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": 2017, "pdf": "http://www.robots.ox.ac.uk/~vgg/publications/2017/Crosswhite17/crosswhite17.pdf"}, {"id": "8334da483f1986aea87b62028672836cb3dc6205", "title": "Fully Associative Patch-Based 1-to-N Matcher for Face Recognition", "addresses": [{"address": "University of Houston", "lat": "29.72079020", "lng": "-95.34406271", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.06306.pdf"}, {"id": "3b64efa817fd609d525c7244a0e00f98feacc8b4", "title": "A Comprehensive Survey on Pose-Invariant Face Recognition", "addresses": [{"address": "University of Technology Sydney", "lat": "-33.88096510", "lng": "151.20107299", "type": "edu"}], "year": 2016, "pdf": "http://doi.acm.org/10.1145/2845089"}, {"id": "6fbb179a4ad39790f4558dd32316b9f2818cd106", "title": "Input Aggregated Network for Face Video Representation", "addresses": [{"address": "Stony Brook University", "lat": "40.91531960", "lng": "-73.12706260", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/6fbb/179a4ad39790f4558dd32316b9f2818cd106.pdf"}, {"id": "d4f1eb008eb80595bcfdac368e23ae9754e1e745", "title": "Unconstrained Face Detection and Open-Set Face Recognition Challenge", "addresses": [{"address": "University of Colorado, Colorado Springs", "lat": "38.89207560", "lng": "-104.79716389", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.02337.pdf"}, {"id": "ebb3d5c70bedf2287f9b26ac0031004f8f617b97", "title": "Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans", "addresses": [{"address": "Electrical and Computer Engineering", "lat": "33.58667840", "lng": "-101.87539204", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/MSP.2017.2764116"}, {"id": "d28d32af7ef9889ef9cb877345a90ea85e70f7f1", "title": "Local-Global Landmark Confidences for Face Recognition", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}, {"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.84"}, {"id": "a29566375836f37173ccaffa47dea25eb1240187", "title": "Vehicle Re-Identification in Context", "addresses": [{"address": "Queen Mary University of London", "lat": "51.52472720", "lng": "-0.03931035", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.09409.pdf"}, {"id": "29f298dd5f806c99951cb434834bc8dcc765df18", "title": "Computationally efficient template-based face recognition", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICPR.2016.7899837"}, {"id": "54bb25a213944b08298e4e2de54f2ddea890954a", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Middlesex University", "lat": "51.59029705", "lng": "-0.22963221", "type": "edu"}], "year": 2017, "pdf": "http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf"}, {"id": "5981c309bd0ffd849c51b1d8a2ccc481a8ec2f5c", "title": "SmartFace: Efficient face detection on smartphones for wireless on-demand emergency networks", "addresses": [{"address": "Philipps-Universit\u00e4t Marburg", "lat": "50.81427010", "lng": "8.77143500", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ICT.2017.7998256"}, {"id": "a2b4a6c6b32900a066d0257ae6d4526db872afe2", "title": "Learning Face Image Quality From Human Assessments", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272466"}, {"id": "3dfb822e16328e0f98a47209d7ecd242e4211f82", "title": "Cross-Age LFW: A Database for Studying Cross-Age Face Recognition in Unconstrained Environments", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.08197.pdf"}, {"id": "291265db88023e92bb8c8e6390438e5da148e8f5", "title": "MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition", "addresses": [{"address": "Microsoft", "lat": "47.64233180", "lng": "-122.13693020", "type": "company"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf"}, {"id": "d29eec5e047560627c16803029d2eb8a4e61da75", "title": "Feature Transfer Learning for Deep Face Recognition with Long-Tail Data", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/d29e/ec5e047560627c16803029d2eb8a4e61da75.pdf"}, {"id": "5180df9d5eb26283fb737f491623395304d57497", "title": "Scalable Angular Discriminative Deep Metric Learning for Face Recognition", "addresses": [{"address": "Tianjin University", "lat": "36.20304395", "lng": "117.05842113", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.10899.pdf"}, {"id": "abdd17e411a7bfe043f280abd4e560a04ab6e992", "title": "Pose-Robust Face Recognition via Deep Residual Equivariant Mapping", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.00839.pdf"}, {"id": "3cf1f89d73ca4b25399c237ed3e664a55cd273a2", "title": "Face Sketch Matching via Coupled Deep Transform Learning", "addresses": [{"address": "IIIT Delhi, India", "lat": "28.54562820", "lng": "77.27315050", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1710.02914.pdf"}, {"id": "f27fd2a1bc229c773238f1912db94991b8bf389a", "title": "How do you develop a face detector for the unconstrained environment?", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/IVCNZ.2016.7804414"}, {"id": "9865fe20df8fe11717d92b5ea63469f59cf1635a", "title": "Wildest Faces: Face Detection and Recognition in Violent Settings", "addresses": [{"address": "Hacettepe University", "lat": "39.86742125", "lng": "32.73519072", "type": "edu"}, {"address": "Middle East Technical University", "lat": "39.87549675", "lng": "32.78553506", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.07566.pdf"}, {"id": "c1cc2a2a1ab66f6c9c6fabe28be45d1440a57c3d", "title": "Dual-Agent GANs for Photorealistic and Identity Preserving Profile Face Synthesis", "addresses": [{"address": "National University of Defense Technology, China", "lat": "28.22902090", "lng": "112.99483204", "type": "edu"}, {"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/aae7/a5182e59f44b7bb49f61999181ce011f800b.pdf"}, {"id": "f5eb411217f729ad7ae84bfd4aeb3dedb850206a", "title": "Tackling Low Resolution for Better Scene Understanding", "addresses": [{"address": "International Institute of Information Technology", "lat": "17.44549570", "lng": "78.34854698", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/f5eb/411217f729ad7ae84bfd4aeb3dedb850206a.pdf"}, {"id": "96e731e82b817c95d4ce48b9e6b08d2394937cf8", "title": "Unconstrained face verification using deep CNN features", "addresses": [{"address": "State University of New Jersey", "lat": "40.51865195", "lng": "-74.44099801", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "http://arxiv.org/pdf/1508.01722v2.pdf"}, {"id": "870433ba89d8cab1656e57ac78f1c26f4998edfb", "title": "Regressing Robust and Discriminative 3D Morphable Models with a Very Deep Neural Network", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.163"}, {"id": "3dfd94d3fad7e17f52a8ae815eb9cc5471172bc0", "title": "Face2Text: Collecting an Annotated Image Description Corpus for the Generation of Rich Face Descriptions", "addresses": [{"address": "University of Copenhagen", "lat": "55.68015020", "lng": "12.57232700", "type": "edu"}, {"address": "University of Malta", "lat": "35.90232260", "lng": "14.48341890", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/3dfd/94d3fad7e17f52a8ae815eb9cc5471172bc0.pdf"}, {"id": "6341274aca0c2977c3e1575378f4f2126aa9b050", "title": "A multi-scale cascade fully convolutional network face detector", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": 2016, "pdf": "http://arxiv.org/pdf/1609.03536v1.pdf"}, {"id": "17479e015a2dcf15d40190e06419a135b66da4e0", "title": "Predicting First Impressions With Deep Learning", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1610.08119.pdf"}, {"id": "a0b1990dd2b4cd87e4fd60912cc1552c34792770", "title": "Deep Constrained Local Models for Facial Landmark Detection", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/a0b1/990dd2b4cd87e4fd60912cc1552c34792770.pdf"}, {"id": "772474b5b0c90629f4d9c223fd9c1ef45e1b1e66", "title": "Multi-dim: A multi-dimensional face database towards the application of 3D technology in real-world scenarios", "addresses": [{"address": "Sichuan University, Chengdu", "lat": "30.64276900", "lng": "104.06751175", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/BTAS.2017.8272716"}, {"id": "4b3f425274b0c2297d136f8833a31866db2f2aec", "title": "Toward Open-Set Face Recognition", "addresses": [{"address": "University of Colorado, Colorado Springs", "lat": "38.89207560", "lng": "-104.79716389", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.85"}, {"id": "17ded725602b4329b1c494bfa41527482bf83a6f", "title": "Compact Convolutional Neural Network Cascade for Face Detection", "addresses": [{"address": "Tomsk Polytechnic University", "lat": "56.46255985", "lng": "84.95565495", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/cb10/434a5d68ffbe9ed0498771192564ecae8894.pdf"}, {"id": "14b016c7a87d142f4b9a0e6dc470dcfc073af517", "title": "Modest proposals for improving biometric recognition papers", "addresses": [{"address": "San Jose State University", "lat": "37.33519080", "lng": "-121.88126008", "type": "edu"}], "year": 2015, "pdf": "http://ws680.nist.gov/publication/get_pdf.cfm?pub_id=918912"}, {"id": "93420d9212dd15b3ef37f566e4d57e76bb2fab2f", "title": "An All-In-One Convolutional Neural Network for Face Analysis", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1611.00851.pdf"}, {"id": "def2983576001bac7d6461d78451159800938112", "title": "The Do\u2019s and Don\u2019ts for CNN-Based Face Verification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1705.07426.pdf"}, {"id": "4b605e6a9362485bfe69950432fa1f896e7d19bf", "title": "A Comparison of Human and Automated Face Verification Accuracy on Unconstrained Image Sets", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2016, "pdf": "http://biometrics.cse.msu.edu/Publications/Face/BlantonAllenMillerKalkaJain_CVPRWB2016_HID.pdf"}, {"id": "8d3e95c31c93548b8c71dbeee2e9f7180067a888", "title": "Template regularized sparse coding for face verification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}, {"address": "GE Global Research", "lat": "42.82715560", "lng": "-73.87804810", "type": "company"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICPR.2016.7899841"}, {"id": "337dd4aaca2c5f9b5d2de8e0e2401b5a8feb9958", "title": "Data-specific Adaptive Threshold for Face Recognition and Authentication", "addresses": [{"address": "Institute of Information Science", "lat": "25.04107280", "lng": "121.61475620", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.11160.pdf"}, {"id": "0aeb5020003e0c89219031b51bd30ff1bceea363", "title": "Sparsifying Neural Network Connections for Face Recognition", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.525"}, {"id": "99daa2839213f904e279aec7cef26c1dfb768c43", "title": "DocFace: Matching ID Document Photos to Selfies", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.02283.pdf"}, {"id": "71ca8b6e84c17b3e68f980bfb8cddc837100f8bf", "title": "Effective 3D based frontalization for unconstrained face recognition", "addresses": [{"address": "University of Florence", "lat": "43.77764260", "lng": "11.25976500", "type": "edu"}], "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899774"}, {"id": "c43ed9b34cad1a3976bac7979808eb038d88af84", "title": "Semi-supervised Adversarial Learning to Generate Photorealistic Face Images of New Identities from 3D Morphable Model", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.03675.pdf"}, {"id": "628a3f027b7646f398c68a680add48c7969ab1d9", "title": "Plan for Final Year Project : HKU-Face : A Large Scale Dataset for Deep Face Recognition", "addresses": [{"address": "Facebook", "lat": "37.39367170", "lng": "-122.08072620", "type": "company"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/628a/3f027b7646f398c68a680add48c7969ab1d9.pdf"}, {"id": "61262450d4d814865a4f9a84299c24daa493f66e", "title": "Biometric recognition in surveillance scenarios: a survey", "addresses": [{"address": "University of Beira Interior", "lat": "40.27730770", "lng": "-7.50958010", "type": "edu"}], "year": "2016", "pdf": "http://doi.org/10.1007/s10462-016-9474-x"}, {"id": "626913b8fcbbaee8932997d6c4a78fe1ce646127", "title": "Learning from Millions of 3D Scans for Large-scale 3D Face Recognition", "addresses": [{"address": "University of Western Australia", "lat": "-31.95040445", "lng": "115.79790037", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1711.05942.pdf"}, {"id": "4efd58102ff46b7435c9ec6d4fc3dd21d93b15b4", "title": "Matching Software-Generated Sketches to Face Photographs With a Very Deep CNN, Morphed Faces, and Transfer Learning", "addresses": [{"address": "University of Malta", "lat": "35.90232260", "lng": "14.48341890", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/TIFS.2017.2788002"}, {"id": "b6f758be954d34817d4ebaa22b30c63a4b8ddb35", "title": "A Proximity-Aware Hierarchical Clustering of Faces", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2017, "pdf": "http://arxiv.org/abs/1703.04835"}, {"id": "0a34fe39e9938ae8c813a81ae6d2d3a325600e5c", "title": "FacePoseNet: Making a Case for Landmark-Free Face Alignment", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.07517.pdf"}, {"id": "84ae55603bffda40c225fe93029d39f04793e01f", "title": "ICB-RW 2016: International challenge on biometric recognition in the wild", "addresses": [{"address": "University of Beira Interior", "lat": "40.27730770", "lng": "-7.50958010", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICB.2016.7550066"}, {"id": "73ea06787925157df519a15ee01cc3dc1982a7e0", "title": "Fast Face Image Synthesis with Minimal Training", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1811.01474.pdf"}, {"id": "c6382de52636705be5898017f2f8ed7c70d7ae96", "title": "Unconstrained face detection: State of the art baseline and challenges", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}, {"address": "Noblis, Falls Church, VA, U.S.A.", "lat": "38.95187000", "lng": "-77.36325900", "type": "company"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139089"}, {"id": "eee06d68497be8bf3a8aba4fde42a13aa090b301", "title": "CR-GAN: Learning Complete Representations for Multi-view Generation", "addresses": [{"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}, {"address": "University of North Carolina at Charlotte", "lat": "35.31034410", "lng": "-80.73261617", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.11191.pdf"}, {"id": "a3201e955d6607d383332f3a12a7befa08c5a18c", "title": "VLAD encoded Deep Convolutional features for unconstrained face verification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}, {"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}], "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900276"}, {"id": "52d7eb0fbc3522434c13cc247549f74bb9609c5d", "title": "WIDER FACE: A Face Detection Benchmark", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1511.06523.pdf"}, {"id": "19458454308a9f56b7de76bf7d8ff8eaa52b0173", "title": "Deep Features for Recognizing Disguised Faces in the Wild", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/1945/8454308a9f56b7de76bf7d8ff8eaa52b0173.pdf"}, {"id": "746c0205fdf191a737df7af000eaec9409ede73f", "title": "Investigating Nuisances in DCNN-Based Face Recognition", "addresses": [{"address": "University of Florence", "lat": "43.77764260", "lng": "11.25976500", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8423119"}, {"id": "0081e2188c8f34fcea3e23c49fb3e17883b33551", "title": "Training Deep Face Recognition Systems with Synthetic Data", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/0081/e2188c8f34fcea3e23c49fb3e17883b33551.pdf"}, {"id": "2b869d5551b10f13bf6fcdb8d13f0aa4d1f59fc4", "title": "Ring loss: Convex Feature Normalization for Face Recognition", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.00130.pdf"}, {"id": "5f771fed91c8e4b666489ba2384d0705bcf75030", "title": "Understanding Humans in Crowded Scenes: Deep Nested Adversarial Learning and A New Benchmark for Multi-Human Parsing", "addresses": [{"address": "National University of Defense Technology, China", "lat": "28.22902090", "lng": "112.99483204", "type": "edu"}, {"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.03287.pdf"}, {"id": "2241eda10b76efd84f3c05bdd836619b4a3df97e", "title": "One-to-many face recognition with bilinear CNNs", "addresses": [{"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}], "year": 2016, "pdf": "http://arxiv.org/pdf/1506.01342v5.pdf"}, {"id": "58d76380d194248b3bb291b8c7c5137a0a376897", "title": "FaceID-GAN : Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/58d7/6380d194248b3bb291b8c7c5137a0a376897.pdf"}, {"id": "7fb5006b6522436ece5bedf509e79bdb7b79c9a7", "title": "Multi-Task Convolutional Neural Network for Face Recognition", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/7fb5/006b6522436ece5bedf509e79bdb7b79c9a7.pdf"}, {"id": "28646c6220848db46c6944967298d89a6559c700", "title": "It takes two to tango : Cascading off-the-shelf face detectors", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/2864/6c6220848db46c6944967298d89a6559c700.pdf"}, {"id": "5812d8239d691e99d4108396f8c26ec0619767a6", "title": "GhostVLAD for set-based face recognition", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.09951.pdf"}, {"id": "e4c3587392d477b7594086c6f28a00a826abf004", "title": "Face recognition by facial attribute assisted network", "addresses": [{"address": "National Taiwan University of Science and Technology", "lat": "25.01353105", "lng": "121.54173736", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296998"}, {"id": "47190d213caef85e8b9dd0d271dbadc29ed0a953", "title": "The Devil of Face Recognition is in the Noise", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}, {"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11649.pdf"}, {"id": "ce6d60b69eb95477596535227958109e07c61e1e", "title": "Unconstrained face verification using fisher vectors computed from frontalized faces", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2015, "pdf": "http://www.rci.rutgers.edu/~vmp93/Conference_pub/BTAS_2015_FVFF_JunCheng_Chen.pdf"}, {"id": "38d8ff137ff753f04689e6b76119a44588e143f3", "title": "When 3D-Aided 2D Face Recognition Meets Deep Learning: An extended UR2D for Pose-Invariant Face Recognition", "addresses": [{"address": "University of Houston", "lat": "29.72079020", "lng": "-95.34406271", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/38d8/ff137ff753f04689e6b76119a44588e143f3.pdf"}, {"id": "9627f28ea5f4c389350572b15968386d7ce3fe49", "title": "Load Balanced GANs for Multi-view Face Image Synthesis", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1802.07447.pdf"}, {"id": "4e7ed13e541b8ed868480375785005d33530e06d", "title": "Face recognition using deep multi-pose representations", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477555"}, {"id": "582edc19f2b1ab2ac6883426f147196c8306685a", "title": "Do We Really Need to Collect Millions of Faces for Effective Face Recognition?", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/be6c/db7b181e73f546d43cf2ab6bc7181d7d619b.pdf"}, {"id": "87e6cb090aecfc6f03a3b00650a5c5f475dfebe1", "title": "Holistically Constrained Local Model: Going Beyond Frontal Poses for Facial Landmark Detection", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/87e6/cb090aecfc6f03a3b00650a5c5f475dfebe1.pdf"}, {"id": "3b9b200e76a35178da940279d566bbb7dfebb787", "title": "Learning Channel Inter-dependencies at Multiple Scales on Dense Networks for Face Recognition", "addresses": [{"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/3b9b/200e76a35178da940279d566bbb7dfebb787.pdf"}, {"id": "de79437f74e8e3b266afc664decf4e6e4bdf34d7", "title": "To face or not to face: Towards reducing false positive of face detection", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/IVCNZ.2016.7804415"}, {"id": "368d59cf1733af511ed8abbcbeb4fb47afd4da1c", "title": "To Frontalize or Not To Frontalize: A Study of Face Pre-Processing Techniques and Their Impact on Recognition", "addresses": [{"address": "University of Ljubljana", "lat": "46.05015580", "lng": "14.46907327", "type": "edu"}, {"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/368d/59cf1733af511ed8abbcbeb4fb47afd4da1c.pdf"}, {"id": "62e913431bcef5983955e9ca160b91bb19d9de42", "title": "Facial Landmark Detection with Tweaked Convolutional Neural Networks", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/62e9/13431bcef5983955e9ca160b91bb19d9de42.pdf"}, {"id": "acd4280453b995cb071c33f7c9db5760432f4279", "title": "Deep transformation learning for face recognition in the unconstrained scene", "addresses": [{"address": "Chongqing University", "lat": "29.50841740", "lng": "106.57858552", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1007/s00138-018-0907-1"}, {"id": "ceeb67bf53ffab1395c36f1141b516f893bada27", "title": "Face Alignment by Local Deep Descriptor Regression", "addresses": [{"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}, {"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/ceeb/67bf53ffab1395c36f1141b516f893bada27.pdf"}, {"id": "37619564574856c6184005830deda4310d3ca580", "title": "A deep pyramid Deformable Part Model for face detection", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/BTAS.2015.7358755"}, {"id": "eb027969f9310e0ae941e2adee2d42cdf07d938c", "title": "VGGFace2: A Dataset for Recognising Faces across Pose and Age", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1710.08092.pdf"}, {"id": "3c97c32ff575989ef2869f86d89c63005fc11ba9", "title": "Face Detection with the Faster R-CNN", "addresses": [{"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}], "year": 2017, "pdf": "http://people.cs.umass.edu/~hzjiang/pubs/face_det_fg_2017.pdf"}, {"id": "4f7b92bd678772552b3c3edfc9a7c5c4a8c60a8e", "title": "Deep Density Clustering of Unconstrained Faces", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/4f7b/92bd678772552b3c3edfc9a7c5c4a8c60a8e.pdf"}, {"id": "fca9ebaa30d69ccec8bb577c31693c936c869e72", "title": "Look Across Elapse: Disentangled Representation Learning and Photorealistic Cross-Age Face Synthesis for Age-Invariant Face Recognition", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.00338.pdf"}]}
\ No newline at end of file +{"id": "0cb2dd5f178e3a297a0c33068961018659d0f443", "paper": {"paper_id": "0cb2dd5f178e3a297a0c33068961018659d0f443", "key": "ijb_c", "title": "IARPA Janus Benchmark-B Face Dataset", "year": 2017, "pdf": "http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf", "address": "", "name": "IJB-B"}, "address": null, "additional_papers": [{"paper_id": "140c95e53c619eac594d70f6369f518adfea12ef", "key": "ijb_c", "title": "Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A", "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf", "address": "", "name": "IJB-B"}, {"paper_id": "57178b36c21fd7f4529ac6748614bb3374714e91", "key": "ijb_c", "title": "IARPA Janus Benchmark - C: Face Dataset and Protocol", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217", "address": "", "name": "IJB-B"}], "citations": [{"id": "4f7b92bd678772552b3c3edfc9a7c5c4a8c60a8e", "title": "Deep Density Clustering of Unconstrained Faces", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/4f7b/92bd678772552b3c3edfc9a7c5c4a8c60a8e.pdf"}, {"id": "5812d8239d691e99d4108396f8c26ec0619767a6", "title": "GhostVLAD for set-based face recognition", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.09951.pdf"}, {"id": "eb027969f9310e0ae941e2adee2d42cdf07d938c", "title": "VGGFace2: A Dataset for Recognising Faces across Pose and Age", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1710.08092.pdf"}, {"id": "0a34fe39e9938ae8c813a81ae6d2d3a325600e5c", "title": "FacePoseNet: Making a Case for Landmark-Free Face Alignment", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.07517.pdf"}, {"id": "73ea06787925157df519a15ee01cc3dc1982a7e0", "title": "Fast Face Image Synthesis with Minimal Training", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1811.01474.pdf"}, {"id": "cd6aaa37fffd0b5c2320f386be322b8adaa1cc68", "title": "Deep Face Recognition: A Survey", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.06655.pdf"}, {"id": "872dfdeccf99bbbed7c8f1ea08afb2d713ebe085", "title": "L2-constrained Softmax Loss for Discriminative Face Verification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1703.09507.pdf"}, {"id": "146a7ecc7e34b85276dd0275c337eff6ba6ef8c0", "title": "AFFACT: Alignment-free facial attribute classification technique", "addresses": [{"address": "University of Colorado, Colorado Springs", "lat": "38.89207560", "lng": "-104.79716389", "type": "edu"}], "year": 2017, "pdf": "https://arxiv.org/pdf/1611.06158v1.pdf"}, {"id": "313d5eba97fe064bdc1f00b7587a4b3543ef712a", "title": "Compact Deep Aggregation for Set Retrieval", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/cb7f/93467b0ec1afd43d995e511f5d7bf052a5af.pdf"}, {"id": "5865b6d83ba6dbbf9167f1481e9339c2ef1d1f6b", "title": "Regularized metric adaptation for unconstrained face verification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICPR.2016.7900278"}, {"id": "48a9241edda07252c1aadca09875fabcfee32871", "title": "Convolutional Experts Constrained Local Model for Facial Landmark Detection", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2017, "pdf": "https://arxiv.org/pdf/1611.08657v5.pdf"}, {"id": "86204fc037936754813b91898377e8831396551a", "title": "Dense Face Alignment", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1709.01442.pdf"}, {"id": "b2cb335ded99b10f37002d09753bd5a6ea522ef1", "title": "Analysis of adaptability of deep features for verifying blurred and cross-resolution images", "addresses": [{"address": "Institute of Engineering and Management, Kolkata, India", "lat": "22.57423855", "lng": "88.43373030", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ISBA.2017.7947679"}, {"id": "486840f4f524e97f692a7f6b42cd19019ee71533", "title": "DeepVisage: Making Face Recognition Simple Yet With Powerful Generalization Skills", "addresses": [{"address": "\u00c9cole Centrale de Lyon", "lat": "45.78359660", "lng": "4.76789480", "type": "edu"}, {"address": "Safran Identity and Security", "lat": "48.83249300", "lng": "2.26747400", "type": "company"}], "year": 2017, "pdf": "https://arxiv.org/pdf/1703.08388v2.pdf"}, {"id": "2d748f8ee023a5b1fbd50294d176981ded4ad4ee", "title": "Triplet Similarity Embedding for Face Verification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/2d74/8f8ee023a5b1fbd50294d176981ded4ad4ee.pdf"}, {"id": "f7824758800a7b1a386db5bd35f84c81454d017a", "title": "KEPLER: Keypoint and Pose Estimation of Unconstrained Faces by Learning Efficient H-CNN Regressors", "addresses": [{"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1702.05085.pdf"}, {"id": "02467703b6e087799e04e321bea3a4c354c5487d", "title": "Grouper: Optimizing Crowdsourced Face Annotations", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.27"}, {"id": "377f2b65e6a9300448bdccf678cde59449ecd337", "title": "Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results", "addresses": [{"address": "Johns Hopkins University", "lat": "39.32905300", "lng": "-76.61942500", "type": "edu"}, {"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.10275.pdf"}, {"id": "cd55fb30737625e86454a2861302b96833ed549d", "title": "Annotating Unconstrained Face Imagery: A scalable approach", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}, {"address": "Noblis, Falls Church, VA, U.S.A.", "lat": "38.95187000", "lng": "-77.36325900", "type": "company"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139094"}, {"id": "5226296884b3e151ce317a37f94827dbda0b9d16", "title": "Deep pair-wise similarity learning for face recognition", "addresses": [{"address": "University of Ljubljana", "lat": "46.05015580", "lng": "14.46907327", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/IWBF.2016.7449690"}, {"id": "80be8624771104ff4838dcba9629bacfe6b3ea09", "title": "Simultaneous Feature and Dictionary Learning for Image Set Based Face Recognition", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}, {"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": 2014, "pdf": "http://www.ifp.illinois.edu/~moulin/Papers/ECCV14-jiwen.pdf"}, {"id": "50b58becaf67e92a6d9633e0eea7d352157377c3", "title": "Dependency-Aware Attention Control for Unconstrained Face Recognition with Image Sets", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/50b5/8becaf67e92a6d9633e0eea7d352157377c3.pdf"}, {"id": "ac2881bdf7b57dc1672a17b221d68a438d79fce8", "title": "Learning a High Fidelity Pose Invariant Model for High-resolution Face Frontalization", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.08472.pdf"}, {"id": "72a7eb68f0955564e1ceafa75aeeb6b5bbb14e7e", "title": "Face Recognition with Contrastive Convolution", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/72a7/eb68f0955564e1ceafa75aeeb6b5bbb14e7e.pdf"}, {"id": "368e99f669ea5fd395b3193cd75b301a76150f9d", "title": "One-to-many face recognition with bilinear CNNs", "addresses": [{"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1506.01342.pdf"}, {"id": "1e6ed6ca8209340573a5e907a6e2e546a3bf2d28", "title": "Pooling Faces: Template Based Face Recognition with Pooled Face Images", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2016, "pdf": "http://arxiv.org/pdf/1607.01450v1.pdf"}, {"id": "052f994898c79529955917f3dfc5181586282cf8", "title": "Unsupervised Domain Adaptation for Face Recognition in Unlabeled Videos", "addresses": [{"address": "Dalian University of Technology", "lat": "38.88140235", "lng": "121.52281098", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.02191.pdf"}, {"id": "4e8168fbaa615009d1618a9d6552bfad809309e9", "title": "Deep Convolutional Neural Network Features and the Original Image", "addresses": [{"address": "University of Texas at Dallas", "lat": "32.98207990", "lng": "-96.75662780", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4e81/68fbaa615009d1618a9d6552bfad809309e9.pdf"}, {"id": "3cb2841302af1fb9656f144abc79d4f3d0b27380", "title": "When 3 D-Aided 2 D Face Recognition Meets Deep Learning : An extended UR 2 D for Pose-Invariant Face Recognition", "addresses": [{"address": "University of Houston", "lat": "29.72079020", "lng": "-95.34406271", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/3cb2/841302af1fb9656f144abc79d4f3d0b27380.pdf"}, {"id": "0c1d85a197a1f5b7376652a485523e616a406273", "title": "Joint Registration and Representation Learning for Unconstrained Face Identification", "addresses": [{"address": "Khalifa University", "lat": "24.44690250", "lng": "54.39425630", "type": "edu"}, {"address": "University of Canberra", "lat": "-35.23656905", "lng": "149.08446994", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.169"}, {"id": "c75e6ce54caf17b2780b4b53f8d29086b391e839", "title": "ExpNet: Landmark-Free, Deep, 3D Facial Expressions", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1802.00542.pdf"}, {"id": "450c6a57f19f5aa45626bb08d7d5d6acdb863b4b", "title": "Towards Interpretable Face Recognition", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.00611.pdf"}, {"id": "30180f66d5b4b7c0367e4b43e2b55367b72d6d2a", "title": "Template Adaptation for Face Verification and Identification", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": 2017, "pdf": "http://www.robots.ox.ac.uk/~vgg/publications/2017/Crosswhite17/crosswhite17.pdf"}, {"id": "8334da483f1986aea87b62028672836cb3dc6205", "title": "Fully Associative Patch-Based 1-to-N Matcher for Face Recognition", "addresses": [{"address": "University of Houston", "lat": "29.72079020", "lng": "-95.34406271", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.06306.pdf"}, {"id": "3b64efa817fd609d525c7244a0e00f98feacc8b4", "title": "A Comprehensive Survey on Pose-Invariant Face Recognition", "addresses": [{"address": "University of Technology Sydney", "lat": "-33.88096510", "lng": "151.20107299", "type": "edu"}], "year": 2016, "pdf": "http://doi.acm.org/10.1145/2845089"}, {"id": "6fbb179a4ad39790f4558dd32316b9f2818cd106", "title": "Input Aggregated Network for Face Video Representation", "addresses": [{"address": "Stony Brook University", "lat": "40.91531960", "lng": "-73.12706260", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/6fbb/179a4ad39790f4558dd32316b9f2818cd106.pdf"}, {"id": "d4f1eb008eb80595bcfdac368e23ae9754e1e745", "title": "Unconstrained Face Detection and Open-Set Face Recognition Challenge", "addresses": [{"address": "University of Colorado, Colorado Springs", "lat": "38.89207560", "lng": "-104.79716389", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.02337.pdf"}, {"id": "ebb3d5c70bedf2287f9b26ac0031004f8f617b97", "title": "Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans", "addresses": [{"address": "Electrical and Computer Engineering", "lat": "33.58667840", "lng": "-101.87539204", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/MSP.2017.2764116"}, {"id": "d28d32af7ef9889ef9cb877345a90ea85e70f7f1", "title": "Local-Global Landmark Confidences for Face Recognition", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}, {"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.84"}, {"id": "a29566375836f37173ccaffa47dea25eb1240187", "title": "Vehicle Re-Identification in Context", "addresses": [{"address": "Queen Mary University of London", "lat": "51.52472720", "lng": "-0.03931035", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.09409.pdf"}, {"id": "29f298dd5f806c99951cb434834bc8dcc765df18", "title": "Computationally efficient template-based face recognition", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICPR.2016.7899837"}, {"id": "54bb25a213944b08298e4e2de54f2ddea890954a", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Middlesex University", "lat": "51.59029705", "lng": "-0.22963221", "type": "edu"}], "year": 2017, "pdf": "http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf"}, {"id": "5981c309bd0ffd849c51b1d8a2ccc481a8ec2f5c", "title": "SmartFace: Efficient face detection on smartphones for wireless on-demand emergency networks", "addresses": [{"address": "Philipps-Universit\u00e4t Marburg", "lat": "50.81427010", "lng": "8.77143500", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ICT.2017.7998256"}, {"id": "a2b4a6c6b32900a066d0257ae6d4526db872afe2", "title": "Learning Face Image Quality From Human Assessments", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272466"}, {"id": "3dfb822e16328e0f98a47209d7ecd242e4211f82", "title": "Cross-Age LFW: A Database for Studying Cross-Age Face Recognition in Unconstrained Environments", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.08197.pdf"}, {"id": "291265db88023e92bb8c8e6390438e5da148e8f5", "title": "MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition", "addresses": [{"address": "Microsoft", "lat": "47.64233180", "lng": "-122.13693020", "type": "company"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf"}, {"id": "d29eec5e047560627c16803029d2eb8a4e61da75", "title": "Feature Transfer Learning for Deep Face Recognition with Long-Tail Data", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/d29e/ec5e047560627c16803029d2eb8a4e61da75.pdf"}, {"id": "5180df9d5eb26283fb737f491623395304d57497", "title": "Scalable Angular Discriminative Deep Metric Learning for Face Recognition", "addresses": [{"address": "Tianjin University", "lat": "36.20304395", "lng": "117.05842113", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.10899.pdf"}, {"id": "abdd17e411a7bfe043f280abd4e560a04ab6e992", "title": "Pose-Robust Face Recognition via Deep Residual Equivariant Mapping", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.00839.pdf"}, {"id": "3cf1f89d73ca4b25399c237ed3e664a55cd273a2", "title": "Face Sketch Matching via Coupled Deep Transform Learning", "addresses": [{"address": "IIIT Delhi, India", "lat": "28.54562820", "lng": "77.27315050", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1710.02914.pdf"}, {"id": "f27fd2a1bc229c773238f1912db94991b8bf389a", "title": "How do you develop a face detector for the unconstrained environment?", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/IVCNZ.2016.7804414"}, {"id": "9865fe20df8fe11717d92b5ea63469f59cf1635a", "title": "Wildest Faces: Face Detection and Recognition in Violent Settings", "addresses": [{"address": "Hacettepe University", "lat": "39.86742125", "lng": "32.73519072", "type": "edu"}, {"address": "Middle East Technical University", "lat": "39.87549675", "lng": "32.78553506", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.07566.pdf"}, {"id": "c1cc2a2a1ab66f6c9c6fabe28be45d1440a57c3d", "title": "Dual-Agent GANs for Photorealistic and Identity Preserving Profile Face Synthesis", "addresses": [{"address": "National University of Defense Technology, China", "lat": "28.22902090", "lng": "112.99483204", "type": "edu"}, {"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/aae7/a5182e59f44b7bb49f61999181ce011f800b.pdf"}, {"id": "f5eb411217f729ad7ae84bfd4aeb3dedb850206a", "title": "Tackling Low Resolution for Better Scene Understanding", "addresses": [{"address": "International Institute of Information Technology", "lat": "17.44549570", "lng": "78.34854698", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/f5eb/411217f729ad7ae84bfd4aeb3dedb850206a.pdf"}, {"id": "96e731e82b817c95d4ce48b9e6b08d2394937cf8", "title": "Unconstrained face verification using deep CNN features", "addresses": [{"address": "State University of New Jersey", "lat": "40.51865195", "lng": "-74.44099801", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "http://arxiv.org/pdf/1508.01722v2.pdf"}, {"id": "870433ba89d8cab1656e57ac78f1c26f4998edfb", "title": "Regressing Robust and Discriminative 3D Morphable Models with a Very Deep Neural Network", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.163"}, {"id": "3dfd94d3fad7e17f52a8ae815eb9cc5471172bc0", "title": "Face2Text: Collecting an Annotated Image Description Corpus for the Generation of Rich Face Descriptions", "addresses": [{"address": "University of Copenhagen", "lat": "55.68015020", "lng": "12.57232700", "type": "edu"}, {"address": "University of Malta", "lat": "35.90232260", "lng": "14.48341890", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/3dfd/94d3fad7e17f52a8ae815eb9cc5471172bc0.pdf"}, {"id": "6341274aca0c2977c3e1575378f4f2126aa9b050", "title": "A multi-scale cascade fully convolutional network face detector", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": 2016, "pdf": "http://arxiv.org/pdf/1609.03536v1.pdf"}, {"id": "17479e015a2dcf15d40190e06419a135b66da4e0", "title": "Predicting First Impressions With Deep Learning", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1610.08119.pdf"}, {"id": "a0b1990dd2b4cd87e4fd60912cc1552c34792770", "title": "Deep Constrained Local Models for Facial Landmark Detection", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/a0b1/990dd2b4cd87e4fd60912cc1552c34792770.pdf"}, {"id": "772474b5b0c90629f4d9c223fd9c1ef45e1b1e66", "title": "Multi-dim: A multi-dimensional face database towards the application of 3D technology in real-world scenarios", "addresses": [{"address": "Sichuan University, Chengdu", "lat": "30.64276900", "lng": "104.06751175", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/BTAS.2017.8272716"}, {"id": "4b3f425274b0c2297d136f8833a31866db2f2aec", "title": "Toward Open-Set Face Recognition", "addresses": [{"address": "University of Colorado, Colorado Springs", "lat": "38.89207560", "lng": "-104.79716389", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.85"}, {"id": "17ded725602b4329b1c494bfa41527482bf83a6f", "title": "Compact Convolutional Neural Network Cascade for Face Detection", "addresses": [{"address": "Tomsk Polytechnic University", "lat": "56.46255985", "lng": "84.95565495", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/cb10/434a5d68ffbe9ed0498771192564ecae8894.pdf"}, {"id": "14b016c7a87d142f4b9a0e6dc470dcfc073af517", "title": "Modest proposals for improving biometric recognition papers", "addresses": [{"address": "San Jose State University", "lat": "37.33519080", "lng": "-121.88126008", "type": "edu"}], "year": 2015, "pdf": "http://ws680.nist.gov/publication/get_pdf.cfm?pub_id=918912"}, {"id": "93420d9212dd15b3ef37f566e4d57e76bb2fab2f", "title": "An All-In-One Convolutional Neural Network for Face Analysis", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1611.00851.pdf"}, {"id": "def2983576001bac7d6461d78451159800938112", "title": "The Do\u2019s and Don\u2019ts for CNN-Based Face Verification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1705.07426.pdf"}, {"id": "4b605e6a9362485bfe69950432fa1f896e7d19bf", "title": "A Comparison of Human and Automated Face Verification Accuracy on Unconstrained Image Sets", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2016, "pdf": "http://biometrics.cse.msu.edu/Publications/Face/BlantonAllenMillerKalkaJain_CVPRWB2016_HID.pdf"}, {"id": "8d3e95c31c93548b8c71dbeee2e9f7180067a888", "title": "Template regularized sparse coding for face verification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}, {"address": "GE Global Research", "lat": "42.82715560", "lng": "-73.87804810", "type": "company"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICPR.2016.7899841"}, {"id": "337dd4aaca2c5f9b5d2de8e0e2401b5a8feb9958", "title": "Data-specific Adaptive Threshold for Face Recognition and Authentication", "addresses": [{"address": "Institute of Information Science", "lat": "25.04107280", "lng": "121.61475620", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.11160.pdf"}, {"id": "0aeb5020003e0c89219031b51bd30ff1bceea363", "title": "Sparsifying Neural Network Connections for Face Recognition", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.525"}, {"id": "99daa2839213f904e279aec7cef26c1dfb768c43", "title": "DocFace: Matching ID Document Photos to Selfies", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.02283.pdf"}, {"id": "71ca8b6e84c17b3e68f980bfb8cddc837100f8bf", "title": "Effective 3D based frontalization for unconstrained face recognition", "addresses": [{"address": "University of Florence", "lat": "43.77764260", "lng": "11.25976500", "type": "edu"}], "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899774"}, {"id": "c43ed9b34cad1a3976bac7979808eb038d88af84", "title": "Semi-supervised Adversarial Learning to Generate Photorealistic Face Images of New Identities from 3D Morphable Model", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.03675.pdf"}, {"id": "628a3f027b7646f398c68a680add48c7969ab1d9", "title": "Plan for Final Year Project : HKU-Face : A Large Scale Dataset for Deep Face Recognition", "addresses": [{"address": "Facebook", "lat": "37.39367170", "lng": "-122.08072620", "type": "company"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/628a/3f027b7646f398c68a680add48c7969ab1d9.pdf"}, {"id": "61262450d4d814865a4f9a84299c24daa493f66e", "title": "Biometric recognition in surveillance scenarios: a survey", "addresses": [{"address": "University of Beira Interior", "lat": "40.27730770", "lng": "-7.50958010", "type": "edu"}], "year": "2016", "pdf": "http://doi.org/10.1007/s10462-016-9474-x"}, {"id": "626913b8fcbbaee8932997d6c4a78fe1ce646127", "title": "Learning from Millions of 3D Scans for Large-scale 3D Face Recognition", "addresses": [{"address": "University of Western Australia", "lat": "-31.95040445", "lng": "115.79790037", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1711.05942.pdf"}, {"id": "4efd58102ff46b7435c9ec6d4fc3dd21d93b15b4", "title": "Matching Software-Generated Sketches to Face Photographs With a Very Deep CNN, Morphed Faces, and Transfer Learning", "addresses": [{"address": "University of Malta", "lat": "35.90232260", "lng": "14.48341890", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/TIFS.2017.2788002"}, {"id": "b6f758be954d34817d4ebaa22b30c63a4b8ddb35", "title": "A Proximity-Aware Hierarchical Clustering of Faces", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2017, "pdf": "http://arxiv.org/abs/1703.04835"}, {"id": "84ae55603bffda40c225fe93029d39f04793e01f", "title": "ICB-RW 2016: International challenge on biometric recognition in the wild", "addresses": [{"address": "University of Beira Interior", "lat": "40.27730770", "lng": "-7.50958010", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICB.2016.7550066"}, {"id": "c6382de52636705be5898017f2f8ed7c70d7ae96", "title": "Unconstrained face detection: State of the art baseline and challenges", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}, {"address": "Noblis, Falls Church, VA, U.S.A.", "lat": "38.95187000", "lng": "-77.36325900", "type": "company"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139089"}, {"id": "eee06d68497be8bf3a8aba4fde42a13aa090b301", "title": "CR-GAN: Learning Complete Representations for Multi-view Generation", "addresses": [{"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}, {"address": "University of North Carolina at Charlotte", "lat": "35.31034410", "lng": "-80.73261617", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.11191.pdf"}, {"id": "a3201e955d6607d383332f3a12a7befa08c5a18c", "title": "VLAD encoded Deep Convolutional features for unconstrained face verification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}, {"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}], "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900276"}, {"id": "52d7eb0fbc3522434c13cc247549f74bb9609c5d", "title": "WIDER FACE: A Face Detection Benchmark", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1511.06523.pdf"}, {"id": "19458454308a9f56b7de76bf7d8ff8eaa52b0173", "title": "Deep Features for Recognizing Disguised Faces in the Wild", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/1945/8454308a9f56b7de76bf7d8ff8eaa52b0173.pdf"}, {"id": "746c0205fdf191a737df7af000eaec9409ede73f", "title": "Investigating Nuisances in DCNN-Based Face Recognition", "addresses": [{"address": "University of Florence", "lat": "43.77764260", "lng": "11.25976500", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8423119"}, {"id": "0081e2188c8f34fcea3e23c49fb3e17883b33551", "title": "Training Deep Face Recognition Systems with Synthetic Data", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/0081/e2188c8f34fcea3e23c49fb3e17883b33551.pdf"}, {"id": "2b869d5551b10f13bf6fcdb8d13f0aa4d1f59fc4", "title": "Ring loss: Convex Feature Normalization for Face Recognition", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.00130.pdf"}, {"id": "5f771fed91c8e4b666489ba2384d0705bcf75030", "title": "Understanding Humans in Crowded Scenes: Deep Nested Adversarial Learning and A New Benchmark for Multi-Human Parsing", "addresses": [{"address": "National University of Defense Technology, China", "lat": "28.22902090", "lng": "112.99483204", "type": "edu"}, {"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.03287.pdf"}, {"id": "2241eda10b76efd84f3c05bdd836619b4a3df97e", "title": "One-to-many face recognition with bilinear CNNs", "addresses": [{"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}], "year": 2016, "pdf": "http://arxiv.org/pdf/1506.01342v5.pdf"}, {"id": "58d76380d194248b3bb291b8c7c5137a0a376897", "title": "FaceID-GAN : Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/58d7/6380d194248b3bb291b8c7c5137a0a376897.pdf"}, {"id": "7fb5006b6522436ece5bedf509e79bdb7b79c9a7", "title": "Multi-Task Convolutional Neural Network for Face Recognition", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/7fb5/006b6522436ece5bedf509e79bdb7b79c9a7.pdf"}, {"id": "28646c6220848db46c6944967298d89a6559c700", "title": "It takes two to tango : Cascading off-the-shelf face detectors", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/2864/6c6220848db46c6944967298d89a6559c700.pdf"}, {"id": "e4c3587392d477b7594086c6f28a00a826abf004", "title": "Face recognition by facial attribute assisted network", "addresses": [{"address": "National Taiwan University of Science and Technology", "lat": "25.01353105", "lng": "121.54173736", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296998"}, {"id": "47190d213caef85e8b9dd0d271dbadc29ed0a953", "title": "The Devil of Face Recognition is in the Noise", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}, {"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11649.pdf"}, {"id": "ce6d60b69eb95477596535227958109e07c61e1e", "title": "Unconstrained face verification using fisher vectors computed from frontalized faces", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2015, "pdf": "http://www.rci.rutgers.edu/~vmp93/Conference_pub/BTAS_2015_FVFF_JunCheng_Chen.pdf"}, {"id": "38d8ff137ff753f04689e6b76119a44588e143f3", "title": "When 3D-Aided 2D Face Recognition Meets Deep Learning: An extended UR2D for Pose-Invariant Face Recognition", "addresses": [{"address": "University of Houston", "lat": "29.72079020", "lng": "-95.34406271", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/38d8/ff137ff753f04689e6b76119a44588e143f3.pdf"}, {"id": "9627f28ea5f4c389350572b15968386d7ce3fe49", "title": "Load Balanced GANs for Multi-view Face Image Synthesis", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1802.07447.pdf"}, {"id": "4e7ed13e541b8ed868480375785005d33530e06d", "title": "Face recognition using deep multi-pose representations", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477555"}, {"id": "582edc19f2b1ab2ac6883426f147196c8306685a", "title": "Do We Really Need to Collect Millions of Faces for Effective Face Recognition?", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/be6c/db7b181e73f546d43cf2ab6bc7181d7d619b.pdf"}, {"id": "87e6cb090aecfc6f03a3b00650a5c5f475dfebe1", "title": "Holistically Constrained Local Model: Going Beyond Frontal Poses for Facial Landmark Detection", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/87e6/cb090aecfc6f03a3b00650a5c5f475dfebe1.pdf"}, {"id": "3b9b200e76a35178da940279d566bbb7dfebb787", "title": "Learning Channel Inter-dependencies at Multiple Scales on Dense Networks for Face Recognition", "addresses": [{"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/3b9b/200e76a35178da940279d566bbb7dfebb787.pdf"}, {"id": "de79437f74e8e3b266afc664decf4e6e4bdf34d7", "title": "To face or not to face: Towards reducing false positive of face detection", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/IVCNZ.2016.7804415"}, {"id": "368d59cf1733af511ed8abbcbeb4fb47afd4da1c", "title": "To Frontalize or Not To Frontalize: A Study of Face Pre-Processing Techniques and Their Impact on Recognition", "addresses": [{"address": "University of Ljubljana", "lat": "46.05015580", "lng": "14.46907327", "type": "edu"}, {"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/368d/59cf1733af511ed8abbcbeb4fb47afd4da1c.pdf"}, {"id": "62e913431bcef5983955e9ca160b91bb19d9de42", "title": "Facial Landmark Detection with Tweaked Convolutional Neural Networks", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/62e9/13431bcef5983955e9ca160b91bb19d9de42.pdf"}, {"id": "acd4280453b995cb071c33f7c9db5760432f4279", "title": "Deep transformation learning for face recognition in the unconstrained scene", "addresses": [{"address": "Chongqing University", "lat": "29.50841740", "lng": "106.57858552", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1007/s00138-018-0907-1"}, {"id": "ceeb67bf53ffab1395c36f1141b516f893bada27", "title": "Face Alignment by Local Deep Descriptor Regression", "addresses": [{"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}, {"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/ceeb/67bf53ffab1395c36f1141b516f893bada27.pdf"}, {"id": "37619564574856c6184005830deda4310d3ca580", "title": "A deep pyramid Deformable Part Model for face detection", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/BTAS.2015.7358755"}, {"id": "3c97c32ff575989ef2869f86d89c63005fc11ba9", "title": "Face Detection with the Faster R-CNN", "addresses": [{"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}], "year": 2017, "pdf": "http://people.cs.umass.edu/~hzjiang/pubs/face_det_fg_2017.pdf"}, {"id": "fca9ebaa30d69ccec8bb577c31693c936c869e72", "title": "Look Across Elapse: Disentangled Representation Learning and Photorealistic Cross-Age Face Synthesis for Age-Invariant Face Recognition", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.00338.pdf"}]}
\ No newline at end of file diff --git a/site/datasets/final/images_of_groups.csv b/site/datasets/final/images_of_groups.csv new file mode 100644 index 00000000..856d97b1 --- /dev/null +++ b/site/datasets/final/images_of_groups.csv @@ -0,0 +1,103 @@ +index,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,Images of Groups,images_of_groups,0.0,0.0,,,21d9d0deed16f0ad62a4865e9acf0686f4f15492,main,http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf,Understanding images of groups of people,2009 +1,Images of Groups,images_of_groups,45.42580475,-75.68740118,University of Ottawa,edu,49e2c1bae80e6b75233348102dc44671ee52b548,citation,http://www.site.uottawa.ca/~laganier/publications/esmaeelICIP2014.pdf,Age and gender recognition using informative features of various types,2014 +2,Images of Groups,images_of_groups,41.70456775,-86.23822026,University of Notre Dame,edu,0235b2d2ae306b7755483ac4f564044f46387648,citation,http://pdfs.semanticscholar.org/0235/b2d2ae306b7755483ac4f564044f46387648.pdf,Recognition of Facial Attributes Using Adaptive Sparse Representations of Random Patches,2014 +3,Images of Groups,images_of_groups,37.43131385,-122.16936535,Stanford University,edu,27a299b834a18e45d73e0bf784bbb5b304c197b3,citation,http://ai.stanford.edu/~vigneshr/cvpr_13/cvpr13_social_roles.pdf,Social Role Discovery in Human Events,2013 +4,Images of Groups,images_of_groups,37.43131385,-122.16936535,Stanford University,edu,d84230a2fc9950fccfd37f0291d65e634b5ffc32,citation,http://pdfs.semanticscholar.org/d842/30a2fc9950fccfd37f0291d65e634b5ffc32.pdf,Historical and Modern Image-to-Image Translation with Generative Adversarial Networks,2017 +5,Images of Groups,images_of_groups,25.01682835,121.53846924,National Taiwan University,edu,046865a5f822346c77e2865668ec014ec3282033,citation,http://www.csie.ntu.edu.tw/~winston/papers/chen12discovering.pdf,Discovering informative social subgraphs and predicting pairwise relationships from group photos,2012 +6,Images of Groups,images_of_groups,28.59899755,-81.19712501,University of Central Florida,edu,0aa303109a3402aa5a203877847d549c4a24d933,citation,http://crcv-web.eecs.ucf.edu/papers/cvpr2014/Resemblance_CVPR14.pdf,Who Do I Look Like? Determining Parent-Offspring Resemblance via Gated Autoencoders,2014 +7,Images of Groups,images_of_groups,37.4102193,-122.05965487,Carnegie Mellon University,edu,c6096986b4d6c374ab2d20031e026b581e7bf7e9,citation,http://pdfs.semanticscholar.org/c609/6986b4d6c374ab2d20031e026b581e7bf7e9.pdf,A Framework for Using Context to Understand Images of People,2009 +8,Images of Groups,images_of_groups,51.5231607,-0.1282037,University College London,edu,6aaa77e241fe55ae0c4ad281e27886ea778f9e23,citation,http://pdfs.semanticscholar.org/b562/ad2ae12920cb318c5309a35000b4d5eb27b8.pdf,F-Formation Detection: Individuating Free-Standing Conversational Groups in Images,2015 +9,Images of Groups,images_of_groups,43.7743911,-79.50481085,York University,edu,ffe4bb47ec15f768e1744bdf530d5796ba56cfc1,citation,https://arxiv.org/pdf/1706.04277.pdf,AFIF4: Deep Gender Classification based on AdaBoost-based Fusion of Isolated Facial Features and Foggy Faces,2017 +10,Images of Groups,images_of_groups,27.18794105,31.17009498,Assiut University,edu,ffe4bb47ec15f768e1744bdf530d5796ba56cfc1,citation,https://arxiv.org/pdf/1706.04277.pdf,AFIF4: Deep Gender Classification based on AdaBoost-based Fusion of Isolated Facial Features and Foggy Faces,2017 +11,Images of Groups,images_of_groups,40.9153196,-73.1270626,Stony Brook University,edu,14e9158daf17985ccbb15c9cd31cf457e5551990,citation,http://pdfs.semanticscholar.org/14e9/158daf17985ccbb15c9cd31cf457e5551990.pdf,ConvNets with Smooth Adaptive Activation Functions for Regression,2017 +12,Images of Groups,images_of_groups,40.90826665,-73.11520891,Stony Brook University Hospital,edu,14e9158daf17985ccbb15c9cd31cf457e5551990,citation,http://pdfs.semanticscholar.org/14e9/158daf17985ccbb15c9cd31cf457e5551990.pdf,ConvNets with Smooth Adaptive Activation Functions for Regression,2017 +13,Images of Groups,images_of_groups,-22.8148374,-47.0647708,University of Campinas (UNICAMP),edu,b161d261fabb507803a9e5834571d56a3b87d147,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8122913,Gender recognition from face images using a geometric descriptor,2017 +14,Images of Groups,images_of_groups,40.9153196,-73.1270626,Stony Brook University,edu,1190cba0cae3c8bb81bf80d6a0a83ae8c41240bc,citation,https://pdfs.semanticscholar.org/1190/cba0cae3c8bb81bf80d6a0a83ae8c41240bc.pdf,Squared Earth Mover ’ s Distance Loss for Training Deep Neural Networks on Ordered-Classes,2017 +15,Images of Groups,images_of_groups,24.94314825,121.36862979,National Taipei University,edu,30cc1ddd7a9b4878cca7783a59086bdc49dc4044,citation,https://doi.org/10.1007/s11042-015-2599-0,Intensity contrast masks for gender classification,2015 +16,Images of Groups,images_of_groups,-35.2776999,149.118527,Australian National University,edu,49e541e0bbc7a082e5c952fc70716e66e5713080,citation,http://ieeexplore.ieee.org/document/6460925/,Group expression intensity estimation in videos via Gaussian Processes,2012 +17,Images of Groups,images_of_groups,50.3755269,-4.13937687,Plymouth University,edu,8bed7ff2f75d956652320270eaf331e1f73efb35,citation,https://arxiv.org/pdf/1709.03820.pdf,Emotion recognition in the wild using deep neural networks and Bayesian classifiers,2017 +18,Images of Groups,images_of_groups,39.3650216,16.2257177,University of Calabria,edu,8bed7ff2f75d956652320270eaf331e1f73efb35,citation,https://arxiv.org/pdf/1709.03820.pdf,Emotion recognition in the wild using deep neural networks and Bayesian classifiers,2017 +19,Images of Groups,images_of_groups,51.7534538,-1.25400997,University of Oxford,edu,0be8b12f194fb604be69c139a195799e8ab53fd3,citation,http://www.robots.ox.ac.uk/~vgg/publications/2014/Hoai14/poster.pdf,Talking Heads: Detecting Humans and Recognizing Their Interactions,2014 +20,Images of Groups,images_of_groups,-35.2776999,149.118527,Australian National University,edu,0d3068b352c3733c9e1cc75e449bf7df1f7b10a4,citation,http://doi.ieeecomputersociety.org/10.1109/ACII.2013.111,Context Based Facial Expression Analysis in the Wild,2013 +21,Images of Groups,images_of_groups,45.42580475,-75.68740118,University of Ottawa,edu,16820ccfb626dcdc893cc7735784aed9f63cbb70,citation,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W12/papers/Azarmehr_Real-Time_Embedded_Age_2015_CVPR_paper.pdf,Real-time embedded age and gender classification in unconstrained video,2015 +22,Images of Groups,images_of_groups,51.5247272,-0.03931035,Queen Mary University of London,edu,fcc82154067dfe778423c2df4ed69f0bec6e1534,citation,https://pdfs.semanticscholar.org/fcc8/2154067dfe778423c2df4ed69f0bec6e1534.pdf,Automatic Analysis of Affect and Membership in Group Settings,2017 +23,Images of Groups,images_of_groups,52.17638955,0.14308882,University of Cambridge,edu,fcc82154067dfe778423c2df4ed69f0bec6e1534,citation,https://pdfs.semanticscholar.org/fcc8/2154067dfe778423c2df4ed69f0bec6e1534.pdf,Automatic Analysis of Affect and Membership in Group Settings,2017 +24,Images of Groups,images_of_groups,30.284151,-97.73195598,University of Texas at Austin,edu,45513d0f2f5c0dac5b61f9ff76c7e46cce62f402,citation,http://pdfs.semanticscholar.org/4551/3d0f2f5c0dac5b61f9ff76c7e46cce62f402.pdf,Face Discovery with Social Context,2011 +25,Images of Groups,images_of_groups,37.26728,126.9841151,Seoul National University,edu,282503fa0285240ef42b5b4c74ae0590fe169211,citation,http://pdfs.semanticscholar.org/2825/03fa0285240ef42b5b4c74ae0590fe169211.pdf,Feeding Hand-Crafted Features for Enhancing the Performance of Convolutional Neural Networks,2018 +26,Images of Groups,images_of_groups,-35.2776999,149.118527,Australian National University,edu,1ab881ec87167af9071b2ad8ff6d4ce3eee38477,citation,http://pdfs.semanticscholar.org/1ab8/81ec87167af9071b2ad8ff6d4ce3eee38477.pdf,Finding Happiest Moments in a Social Context,2012 +27,Images of Groups,images_of_groups,-35.23656905,149.08446994,University of Canberra,edu,1ab881ec87167af9071b2ad8ff6d4ce3eee38477,citation,http://pdfs.semanticscholar.org/1ab8/81ec87167af9071b2ad8ff6d4ce3eee38477.pdf,Finding Happiest Moments in a Social Context,2012 +28,Images of Groups,images_of_groups,-35.23656905,149.08446994,University of Canberra,edu,572dbaee6648eefa4c9de9b42551204b985ff863,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163151,The more the merrier: Analysing the affect of a group of people in images,2015 +29,Images of Groups,images_of_groups,32.87935255,-117.23110049,"University of California, San Diego",edu,572dbaee6648eefa4c9de9b42551204b985ff863,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163151,The more the merrier: Analysing the affect of a group of people in images,2015 +30,Images of Groups,images_of_groups,46.0658836,11.1159894,University of Trento,edu,572dbaee6648eefa4c9de9b42551204b985ff863,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163151,The more the merrier: Analysing the affect of a group of people in images,2015 +31,Images of Groups,images_of_groups,35.90503535,-79.04775327,University of North Carolina,edu,dbf6d2619bd41ce4c36488e15d114a2da31b51c9,citation,https://arxiv.org/pdf/1810.00028.pdf,Data-Driven Modeling of Group Entitativity in Virtual Environments,2018 +32,Images of Groups,images_of_groups,39.2899685,-76.62196103,University of Maryland,edu,dbf6d2619bd41ce4c36488e15d114a2da31b51c9,citation,https://arxiv.org/pdf/1810.00028.pdf,Data-Driven Modeling of Group Entitativity in Virtual Environments,2018 +33,Images of Groups,images_of_groups,37.4102193,-122.05965487,Carnegie Mellon University,edu,b593f13f974cf444a5781bbd487e1c69e056a1f7,citation,https://pdfs.semanticscholar.org/b593/f13f974cf444a5781bbd487e1c69e056a1f7.pdf,Query Image Query Image Retrievals Retrievals Transferred Poses Transferred Poses,2018 +34,Images of Groups,images_of_groups,43.7776426,11.259765,University of Florence,edu,02cc96ad997102b7c55e177ac876db3b91b4e72c,citation,http://www.micc.unifi.it/wp-content/uploads/2015/12/2015_museum-visitors-dataset.pdf,"MuseumVisitors: A dataset for pedestrian and group detection, gaze estimation and behavior understanding",2015 +35,Images of Groups,images_of_groups,40.8419836,-73.94368971,Columbia University,edu,02cc96ad997102b7c55e177ac876db3b91b4e72c,citation,http://www.micc.unifi.it/wp-content/uploads/2015/12/2015_museum-visitors-dataset.pdf,"MuseumVisitors: A dataset for pedestrian and group detection, gaze estimation and behavior understanding",2015 +36,Images of Groups,images_of_groups,58.38131405,26.72078081,University of Tartu,edu,1b248ed8e7c9514648cd598960fadf9ab17e7fe8,citation,https://pdfs.semanticscholar.org/1b24/8ed8e7c9514648cd598960fadf9ab17e7fe8.pdf,"From apparent to real age: gender, age, ethnic, makeup, and expression bias analysis in real age estimation",0 +37,Images of Groups,images_of_groups,41.3868913,2.16352385,University of Barcelona,edu,1b248ed8e7c9514648cd598960fadf9ab17e7fe8,citation,https://pdfs.semanticscholar.org/1b24/8ed8e7c9514648cd598960fadf9ab17e7fe8.pdf,"From apparent to real age: gender, age, ethnic, makeup, and expression bias analysis in real age estimation",0 +38,Images of Groups,images_of_groups,51.49887085,-0.17560797,Imperial College London,edu,54bb25a213944b08298e4e2de54f2ddea890954a,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf,"AgeDB: The First Manually Collected, In-the-Wild Age Database",2017 +39,Images of Groups,images_of_groups,51.59029705,-0.22963221,Middlesex University,edu,54bb25a213944b08298e4e2de54f2ddea890954a,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf,"AgeDB: The First Manually Collected, In-the-Wild Age Database",2017 +40,Images of Groups,images_of_groups,39.2899685,-76.62196103,University of Maryland,edu,3b092733f428b12f1f920638f868ed1e8663fe57,citation,http://www.math.jhu.edu/~data/RamaPapers/PerformanceBounds.pdf,On the size of Convolutional Neural Networks and generalization performance,2016 +41,Images of Groups,images_of_groups,33.6431901,-117.84016494,"University of California, Irvine",edu,3991223b1dc3b87883cec7af97cf56534178f74a,citation,http://doi.acm.org/10.1145/2461466.2461469,A unified framework for context assisted face clustering,2013 +42,Images of Groups,images_of_groups,65.0592157,25.46632601,University of Oulu,edu,1e516273554d87bbe1902fa0298179c493299035,citation,http://www.ee.oulu.fi/~hadid/Age-ICPR2012.pdf,Age Classification in Unconstrained Conditions Using LBP Variants,2012 +43,Images of Groups,images_of_groups,50.89273635,-1.39464295,University of Southampton,edu,fd67d0efbd94c9d8f9d2f0a972edd7320bc7604f,citation,http://pdfs.semanticscholar.org/fd67/d0efbd94c9d8f9d2f0a972edd7320bc7604f.pdf,Real-Time Semantic Clothing Segmentation,2012 +44,Images of Groups,images_of_groups,47.6543238,-122.30800894,University of Washington,edu,f2c30594d917ea915028668bc2a481371a72a14d,citation,http://pdfs.semanticscholar.org/f2c3/0594d917ea915028668bc2a481371a72a14d.pdf,Scene Understanding Using Internet Photo Collections,2010 +45,Images of Groups,images_of_groups,40.47913175,-74.43168868,Rutgers University,edu,31f1e711fcf82c855f27396f181bf5e565a2f58d,citation,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.54,Unconstrained Age Estimation with Deep Convolutional Neural Networks,2015 +46,Images of Groups,images_of_groups,39.2899685,-76.62196103,University of Maryland,edu,31f1e711fcf82c855f27396f181bf5e565a2f58d,citation,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.54,Unconstrained Age Estimation with Deep Convolutional Neural Networks,2015 +47,Images of Groups,images_of_groups,35.93006535,-84.31240032,Oak Ridge National Laboratory,edu,2cf3564d7421b661e84251d280d159d4b3ebb336,citation,https://doi.org/10.1109/BTAS.2014.6996287,Discriminating projections for estimating face age in wild images,2014 +48,Images of Groups,images_of_groups,34.2239869,-77.8701325,"UNCW, USA",edu,2cf3564d7421b661e84251d280d159d4b3ebb336,citation,https://doi.org/10.1109/BTAS.2014.6996287,Discriminating projections for estimating face age in wild images,2014 +49,Images of Groups,images_of_groups,34.2249827,-77.86907744,University of North Carolina at Wilmington,edu,2cf3564d7421b661e84251d280d159d4b3ebb336,citation,https://doi.org/10.1109/BTAS.2014.6996287,Discriminating projections for estimating face age in wild images,2014 +50,Images of Groups,images_of_groups,41.3868913,2.16352385,University of Barcelona,edu,500fbe18afd44312738cab91b4689c12b4e0eeee,citation,http://www.maia.ub.es/~sergio/linked/ijcnn_age_and_cultural_2015.pdf,ChaLearn looking at people 2015 new competitions: Age estimation and cultural event recognition,2015 +51,Images of Groups,images_of_groups,45.4312742,12.3265377,University of Venezia,edu,500fbe18afd44312738cab91b4689c12b4e0eeee,citation,http://www.maia.ub.es/~sergio/linked/ijcnn_age_and_cultural_2015.pdf,ChaLearn looking at people 2015 new competitions: Age estimation and cultural event recognition,2015 +52,Images of Groups,images_of_groups,42.4505507,-76.4783513,Cornell University,edu,0d57d3d2d04fc96d731cac99a7a8ef79050dac75,citation,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/Workshops/4990a269.pdf,Not Everybody's Special: Using Neighbors in Referring Expressions with Uncertain Attributes,2013 +53,Images of Groups,images_of_groups,42.4505507,-76.4783513,Cornell University,edu,fbc9ba70e36768efff130c7d970ce52810b044ff,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6738500,Face-graph matching for classifying groups of people,2013 +54,Images of Groups,images_of_groups,37.43131385,-122.16936535,Stanford University,edu,fbc9ba70e36768efff130c7d970ce52810b044ff,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6738500,Face-graph matching for classifying groups of people,2013 +55,Images of Groups,images_of_groups,1.340216,103.965089,Singapore University of Technology and Design,edu,00823e6c0b6f1cf22897b8d0b2596743723ec51c,citation,https://arxiv.org/pdf/1708.07689.pdf,Understanding and Comparing Deep Neural Networks for Age and Gender Classification,2017 +56,Images of Groups,images_of_groups,41.1664858,-73.1920564,University of Bridgeport,edu,ac9a331327cceda4e23f9873f387c9fd161fad76,citation,http://pdfs.semanticscholar.org/ac9a/331327cceda4e23f9873f387c9fd161fad76.pdf,Deep Convolutional Neural Network for Age Estimation based on VGG-Face Model,2017 +57,Images of Groups,images_of_groups,42.4505507,-76.4783513,Cornell University,edu,5aad56cfa2bac5d6635df4184047e809f8fecca2,citation,http://chenlab.ece.cornell.edu/people/Amir/publications/picture_password.pdf,A visual dictionary attack on Picture Passwords,2013 +58,Images of Groups,images_of_groups,42.9336278,-78.88394479,SUNY Buffalo,edu,4793f11fbca4a7dba898b9fff68f70d868e2497c,citation,http://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf,Kinship Verification through Transfer Learning,2011 +59,Images of Groups,images_of_groups,37.4102193,-122.05965487,Carnegie Mellon University,edu,eddc4989cdb20c8cdfb22e989bdb2cb9031d0439,citation,https://arxiv.org/pdf/1804.03080.pdf,Binge Watching: Scaling Affordance Learning from Sitcoms,2017 +60,Images of Groups,images_of_groups,42.3383668,-71.08793524,Northeastern University,edu,090e4713bcccff52dcd0c01169591affd2af7e76,citation,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Shao_What_Do_You_2013_ICCV_paper.pdf,What Do You Do? Occupation Recognition in a Photo via Social Context,2013 +61,Images of Groups,images_of_groups,53.21967825,6.56251482,University of Groningen,edu,4ff4c27e47b0aa80d6383427642bb8ee9d01c0ac,citation,https://doi.org/10.1109/SSCI.2015.37,Deep Convolutional Neural Networks and Support Vector Machines for Gender Recognition,2015 +62,Images of Groups,images_of_groups,40.11116745,-88.22587665,"University of Illinois, Urbana-Champaign",edu,291f527598c589fb0519f890f1beb2749082ddfd,citation,http://pdfs.semanticscholar.org/3215/ceb94227451a958bcf6b1205c710d17e53f5.pdf,Seeing People in Social Context: Recognizing People and Social Relationships,2010 +63,Images of Groups,images_of_groups,42.4505507,-76.4783513,Cornell University,edu,28d06fd508d6f14cd15f251518b36da17909b79e,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Chen_Whats_in_a_2013_CVPR_paper.pdf,What's in a Name? First Names as Facial Attributes,2013 +64,Images of Groups,images_of_groups,37.43131385,-122.16936535,Stanford University,edu,28d06fd508d6f14cd15f251518b36da17909b79e,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Chen_Whats_in_a_2013_CVPR_paper.pdf,What's in a Name? First Names as Facial Attributes,2013 +65,Images of Groups,images_of_groups,47.0570222,21.922709,Queen Mary University,edu,34022637860443c052375c45c4f700afcb438cd0,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.185,Automatic Recognition of Emotions and Membership in Group Videos,2016 +66,Images of Groups,images_of_groups,52.17638955,0.14308882,University of Cambridge,edu,34022637860443c052375c45c4f700afcb438cd0,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.185,Automatic Recognition of Emotions and Membership in Group Videos,2016 +67,Images of Groups,images_of_groups,38.8964679,-104.8050594,University of Colorado at Colorado Springs,edu,e3e2c106ccbd668fb9fca851498c662add257036,citation,http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-et-al-Ensembles.pdf,"Appearance, context and co-occurrence ensembles for identity recognition in personal photo collections",2013 +68,Images of Groups,images_of_groups,25.01682835,121.53846924,National Taiwan University,edu,8ba67f45fbb1ce47a90df38f21834db37c840079,citation,http://www.cmlab.csie.ntu.edu.tw/~yanying/paper/dsp006-chen.pdf,People search and activity mining in large-scale community-contributed photos,2012 +69,Images of Groups,images_of_groups,1.2962018,103.77689944,National University of Singapore,edu,a5219fff98dfe3ec81dee95c4ead69a8e24cc802,citation,https://arxiv.org/pdf/1708.00634.pdf,Dual-Glance Model for Deciphering Social Relationships,2017 +70,Images of Groups,images_of_groups,44.97308605,-93.23708813,University of Minnesota,edu,a5219fff98dfe3ec81dee95c4ead69a8e24cc802,citation,https://arxiv.org/pdf/1708.00634.pdf,Dual-Glance Model for Deciphering Social Relationships,2017 +71,Images of Groups,images_of_groups,40.742252,-74.0270949,Stevens Institute of Technology,edu,1e1d7cbbef67e9e042a3a0a9a1bcefcc4a9adacf,citation,http://personal.stevens.edu/~hli18//data/papers/CVPR2016_CameraReady.pdf,A Multi-level Contextual Model for Person Recognition in Photo Albums,2016 +72,Images of Groups,images_of_groups,-34.9189226,138.60423668,University of Adelaide,edu,3d24b386d003bee176a942c26336dbe8f427aadd,citation,http://arxiv.org/abs/1611.09967,Sequential Person Recognition in Photo Albums with a Recurrent Network,2017 +73,Images of Groups,images_of_groups,37.43131385,-122.16936535,Stanford University,edu,111ae23b60284927f2545dfc59b0147bb3423792,citation,https://pdfs.semanticscholar.org/111a/e23b60284927f2545dfc59b0147bb3423792.pdf,Classroom Data Collection and Analysis using Computer Vision,2016 +74,Images of Groups,images_of_groups,51.99882735,4.37396037,Delft University of Technology,edu,dfbf941adeea19f5dff4a70a466ddd1b77f3b727,citation,https://pdfs.semanticscholar.org/dfbf/941adeea19f5dff4a70a466ddd1b77f3b727.pdf,Models for supervised learning in sequence data,2018 +75,Images of Groups,images_of_groups,40.8419836,-73.94368971,Columbia University,edu,774cbb45968607a027ae4729077734db000a1ec5,citation,http://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf,From Bikers to Surfers: Visual Recognition of Urban Tribes,2013 +76,Images of Groups,images_of_groups,32.87935255,-117.23110049,"University of California, San Diego",edu,774cbb45968607a027ae4729077734db000a1ec5,citation,http://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf,From Bikers to Surfers: Visual Recognition of Urban Tribes,2013 +77,Images of Groups,images_of_groups,47.6543238,-122.30800894,University of Washington,edu,5b2bc289b607ca1a0634555158464f28fe68a6d3,citation,http://vision.ics.uci.edu/papers/GargRSS_CVPR_2011/GargRSS_CVPR_2011.pdf,Where's Waldo: Matching people in images of crowds,2011 +78,Images of Groups,images_of_groups,42.4505507,-76.4783513,Cornell University,edu,5b2bc289b607ca1a0634555158464f28fe68a6d3,citation,http://vision.ics.uci.edu/papers/GargRSS_CVPR_2011/GargRSS_CVPR_2011.pdf,Where's Waldo: Matching people in images of crowds,2011 +79,Images of Groups,images_of_groups,42.4505507,-76.4783513,Cornell University,edu,b185f0a39384ceb3c4923196aeed6d68830a069f,citation,http://pdfs.semanticscholar.org/b185/f0a39384ceb3c4923196aeed6d68830a069f.pdf,Describing Clothing by Semantic Attributes,2012 +80,Images of Groups,images_of_groups,37.43131385,-122.16936535,Stanford University,edu,b185f0a39384ceb3c4923196aeed6d68830a069f,citation,http://pdfs.semanticscholar.org/b185/f0a39384ceb3c4923196aeed6d68830a069f.pdf,Describing Clothing by Semantic Attributes,2012 +81,Images of Groups,images_of_groups,42.4505507,-76.4783513,Cornell University,edu,14c37ea85ba8d74d053a34aedd7e484659fd54d4,citation,http://users.ece.cmu.edu/~dbatra/publications/assets/opd_cvpr10.pdf,Beyond trees: MRF inference via outer-planar decomposition,2010 +82,Images of Groups,images_of_groups,65.0592157,25.46632601,University of Oulu,edu,8d95317d0e366cecae1dd3f7c1ba69fe3fc4a8e0,citation,http://pdfs.semanticscholar.org/f7c7/f4494f73f2fe845be3b82ee711bc00be7508.pdf,Riesz-based Volume Local Binary Pattern and A Novel Group Expression Model for Group Happiness Intensity Analysis,2015 +83,Images of Groups,images_of_groups,-35.23656905,149.08446994,University of Canberra,edu,8d95317d0e366cecae1dd3f7c1ba69fe3fc4a8e0,citation,http://pdfs.semanticscholar.org/f7c7/f4494f73f2fe845be3b82ee711bc00be7508.pdf,Riesz-based Volume Local Binary Pattern and A Novel Group Expression Model for Group Happiness Intensity Analysis,2015 +84,Images of Groups,images_of_groups,-35.2776999,149.118527,Australian National University,edu,8d95317d0e366cecae1dd3f7c1ba69fe3fc4a8e0,citation,http://pdfs.semanticscholar.org/f7c7/f4494f73f2fe845be3b82ee711bc00be7508.pdf,Riesz-based Volume Local Binary Pattern and A Novel Group Expression Model for Group Happiness Intensity Analysis,2015 +85,Images of Groups,images_of_groups,45.42580475,-75.68740118,University of Ottawa,edu,65293ecf6a4c5ab037a2afb4a9a1def95e194e5f,citation,http://pdfs.semanticscholar.org/6529/3ecf6a4c5ab037a2afb4a9a1def95e194e5f.pdf,"Face , Age and Gender Recognition using Local Descriptors",2014 +86,Images of Groups,images_of_groups,32.8536333,-117.2035286,Kyung Hee University,edu,854b1f0581f5d3340f15eb79452363cbf38c04c8,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7903648,Directional Age-Primitive Pattern (DAPP) for Human Age Group Recognition and Age Estimation,2017 +87,Images of Groups,images_of_groups,24.7246403,46.62335012,King Saud University,edu,854b1f0581f5d3340f15eb79452363cbf38c04c8,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7903648,Directional Age-Primitive Pattern (DAPP) for Human Age Group Recognition and Age Estimation,2017 +88,Images of Groups,images_of_groups,23.7289899,90.3982682,Institute of Information Technology,edu,854b1f0581f5d3340f15eb79452363cbf38c04c8,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7903648,Directional Age-Primitive Pattern (DAPP) for Human Age Group Recognition and Age Estimation,2017 +89,Images of Groups,images_of_groups,42.9336278,-78.88394479,SUNY Buffalo,edu,2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d,citation,https://doi.org/10.1109/CVPRW.2011.5981801,Genealogical face recognition based on UB KinFace database,2011 +90,Images of Groups,images_of_groups,35.9042272,-78.85565763,"IBM Research, North Carolina",company,aea50d3414ecb20dc2ba77b0277d0df59bde2c2c,citation,http://pdfs.semanticscholar.org/aea5/0d3414ecb20dc2ba77b0277d0df59bde2c2c.pdf,The #selfiestation: Design and Use of a Kiosk for Taking Selfies in the Enterprise,2015 +91,Images of Groups,images_of_groups,40.00229045,116.32098908,Tsinghua University,edu,0a0d5283439f088c158fcec732e2593bb3cd57ad,citation,http://media.cs.tsinghua.edu.cn/~ahz/papers/whoblockswho_iccv2011_final.pdf,Who Blocks Who: Simultaneous clothing segmentation for grouping images,2011 +92,Images of Groups,images_of_groups,42.36782045,-71.12666653,Harvard University,edu,0ba402af3b8682e2aa89f76bd823ddffdf89fa0a,citation,http://pdfs.semanticscholar.org/c0d8/4377168c554cb8e83099bed940091fe49dec.pdf,Squared Earth Mover's Distance-based Loss for Training Deep Neural Networks,2016 +93,Images of Groups,images_of_groups,40.9153196,-73.1270626,Stony Brook University,edu,0ba402af3b8682e2aa89f76bd823ddffdf89fa0a,citation,http://pdfs.semanticscholar.org/c0d8/4377168c554cb8e83099bed940091fe49dec.pdf,Squared Earth Mover's Distance-based Loss for Training Deep Neural Networks,2016 +94,Images of Groups,images_of_groups,51.5217668,-0.13019072,University of London,edu,b9eb55c2c573e2fffd686b00a39185f0142ef816,citation,http://elvera.nue.tu-berlin.de/files/1241Ramzan2010.pdf,The participation payoff: challenges and opportunities for multimedia access in networked communities,2010 +95,Images of Groups,images_of_groups,51.99882735,4.37396037,Delft University of Technology,edu,b9eb55c2c573e2fffd686b00a39185f0142ef816,citation,http://elvera.nue.tu-berlin.de/files/1241Ramzan2010.pdf,The participation payoff: challenges and opportunities for multimedia access in networked communities,2010 +96,Images of Groups,images_of_groups,1.2962018,103.77689944,National University of Singapore,edu,cc3ef62b4a7eb6c4e45302deb89df2e547b6efcc,citation,http://pdfs.semanticscholar.org/cc3e/f62b4a7eb6c4e45302deb89df2e547b6efcc.pdf,Creating Picture Legends for Group Photos,2012 +97,Images of Groups,images_of_groups,37.4585796,-122.17560525,SRI International,edu,683f5c838ea2c9c50f3f5c5fa064c00868751733,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Chakraborty_3D_Visual_Proxemics_2013_CVPR_paper.pdf,3D Visual Proxemics: Recognizing Human Interactions in 3D from a Single Image,2013 +98,Images of Groups,images_of_groups,40.51865195,-74.44099801,State University of New Jersey,edu,d00e9a6339e34c613053d3b2c132fccbde547b56,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791154,A cascaded convolutional neural network for age estimation of unconstrained faces,2016 +99,Images of Groups,images_of_groups,39.2899685,-76.62196103,University of Maryland,edu,d00e9a6339e34c613053d3b2c132fccbde547b56,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791154,A cascaded convolutional neural network for age estimation of unconstrained faces,2016 +100,Images of Groups,images_of_groups,31.2284923,121.40211389,East China Normal University,edu,5364e58ba1f4cdfcffb247c2421e8f56a75fad8d,citation,https://doi.org/10.1109/VCIP.2017.8305113,Facial age estimation through self-paced learning,2017 +101,Images of Groups,images_of_groups,42.3383668,-71.08793524,Northeastern University,edu,c9f588d295437009994ddaabb64fd4e4c499b294,citation,http://pdfs.semanticscholar.org/c9f5/88d295437009994ddaabb64fd4e4c499b294.pdf,Predicting Professions through Probabilistic Model under Social Context,2013 diff --git a/site/datasets/final/imdb_wiki.csv b/site/datasets/final/imdb_wiki.csv new file mode 100644 index 00000000..645649ff --- /dev/null +++ b/site/datasets/final/imdb_wiki.csv @@ -0,0 +1,130 @@ +index,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,IMDB,imdb_wiki,0.0,0.0,,,10195a163ab6348eef37213a46f60a3d87f289c5,main,https://doi.org/10.1007/s11263-016-0940-3,Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks,2016 +1,IMDB,imdb_wiki,40.9153196,-73.1270626,Stony Brook University,edu,14e9158daf17985ccbb15c9cd31cf457e5551990,citation,http://pdfs.semanticscholar.org/14e9/158daf17985ccbb15c9cd31cf457e5551990.pdf,ConvNets with Smooth Adaptive Activation Functions for Regression,2017 +2,IMDB,imdb_wiki,40.90826665,-73.11520891,Stony Brook University Hospital,edu,14e9158daf17985ccbb15c9cd31cf457e5551990,citation,http://pdfs.semanticscholar.org/14e9/158daf17985ccbb15c9cd31cf457e5551990.pdf,ConvNets with Smooth Adaptive Activation Functions for Regression,2017 +3,IMDB,imdb_wiki,51.5231607,-0.1282037,University College London,edu,3c4f6d24b55b1fd3c5b85c70308d544faef3f69a,citation,http://pdfs.semanticscholar.org/3c4f/6d24b55b1fd3c5b85c70308d544faef3f69a.pdf,A Hybrid Deep Learning Architecture for Privacy-Preserving Mobile Analytics,2017 +4,IMDB,imdb_wiki,45.5039761,-73.5749687,McGill University,edu,13719bbb4bb8bbe0cbcdad009243a926d93be433,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w6/papers/Tian_Deep_LDA-Pruned_Nets_CVPR_2017_paper.pdf,Deep LDA-Pruned Nets for Efficient Facial Gender Classification,2017 +5,IMDB,imdb_wiki,41.3868913,2.16352385,University of Barcelona,edu,b7845e0b0ce17cde7db37d5524ef2a61dee3e540,citation,https://doi.org/10.1109/ICPR.2016.7899608,Fusion of classifier predictions for audio-visual emotion recognition,2016 +6,IMDB,imdb_wiki,44.812384,20.453501,Singidunum University,edu,b7845e0b0ce17cde7db37d5524ef2a61dee3e540,citation,https://doi.org/10.1109/ICPR.2016.7899608,Fusion of classifier predictions for audio-visual emotion recognition,2016 +7,IMDB,imdb_wiki,58.38131405,26.72078081,University of Tartu,edu,b7845e0b0ce17cde7db37d5524ef2a61dee3e540,citation,https://doi.org/10.1109/ICPR.2016.7899608,Fusion of classifier predictions for audio-visual emotion recognition,2016 +8,IMDB,imdb_wiki,1.2962018,103.77689944,National University of Singapore,edu,fca9ebaa30d69ccec8bb577c31693c936c869e72,citation,https://arxiv.org/pdf/1809.00338.pdf,Look Across Elapse: Disentangled Representation Learning and Photorealistic Cross-Age Face Synthesis for Age-Invariant Face Recognition,2018 +9,IMDB,imdb_wiki,40.0044795,116.370238,Chinese Academy of Sciences,edu,fca9ebaa30d69ccec8bb577c31693c936c869e72,citation,https://arxiv.org/pdf/1809.00338.pdf,Look Across Elapse: Disentangled Representation Learning and Photorealistic Cross-Age Face Synthesis for Age-Invariant Face Recognition,2018 +10,IMDB,imdb_wiki,55.91029135,-3.32345777,Heriot-Watt University,edu,2cdc40f20b70ca44d9fd8e7716080ee05ca7924a,citation,http://pdfs.semanticscholar.org/2cdc/40f20b70ca44d9fd8e7716080ee05ca7924a.pdf,Real-time Convolutional Neural Networks for Emotion and Gender Classification,2017 +11,IMDB,imdb_wiki,56.45796755,-2.98214831,University of Dundee,edu,d5b0e73b584be507198b6665bcddeba92b62e1e5,citation,http://pdfs.semanticscholar.org/d5b0/e73b584be507198b6665bcddeba92b62e1e5.pdf,Multi-Region Ensemble Convolutional Neural Networks for High-Accuracy Age Estimation,2017 +12,IMDB,imdb_wiki,22.15263985,113.56803206,Macau University of Science and Technology,edu,d5b0e73b584be507198b6665bcddeba92b62e1e5,citation,http://pdfs.semanticscholar.org/d5b0/e73b584be507198b6665bcddeba92b62e1e5.pdf,Multi-Region Ensemble Convolutional Neural Networks for High-Accuracy Age Estimation,2017 +13,IMDB,imdb_wiki,24.12084345,120.67571165,National Chung Hsing University,edu,6feafc5c1d8b0e9d65ebe4c1512b7860c538fbdc,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8448885,Smart Facial Age Estimation with Stacked Deep Network Fusion,2018 +14,IMDB,imdb_wiki,24.15031065,120.68325501,National Taichung University of Science and Technology,edu,6feafc5c1d8b0e9d65ebe4c1512b7860c538fbdc,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8448885,Smart Facial Age Estimation with Stacked Deep Network Fusion,2018 +15,IMDB,imdb_wiki,41.10427915,29.02231159,Istanbul Technical University,edu,fd53be2e0a9f33080a9db4b5a5e416e24ae8e198,citation,https://arxiv.org/pdf/1606.02909.pdf,Apparent Age Estimation Using Ensemble of Deep Learning Models,2016 +16,IMDB,imdb_wiki,42.357757,-83.06286711,Wayne State University,edu,4f1249369127cc2e2894f6b2f1052d399794919a,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8239663,Deep Age Estimation: From Classification to Ranking,2018 +17,IMDB,imdb_wiki,45.47567215,9.23336232,Università degli Studi di Milano,edu,a713a01971e73d0c3118d0409dc7699a24f521d6,citation,https://doi.org/10.1109/SSCI.2017.8285381,Age estimation based on face images and pre-trained convolutional neural networks,2017 +18,IMDB,imdb_wiki,35.6894875,139.6917064,"IBJ, Inc., Tokyo, Japan",company,df7af280771a6c8302b75ed0a14ffe7854cca679,citation,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2017.8026293,Prediction of users' facial attractiveness on an online dating website,2017 +19,IMDB,imdb_wiki,35.9020448,139.93622009,University of Tokyo,edu,df7af280771a6c8302b75ed0a14ffe7854cca679,citation,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2017.8026293,Prediction of users' facial attractiveness on an online dating website,2017 +20,IMDB,imdb_wiki,35.9990522,-78.9290629,Duke University,edu,cca9ae621e8228cfa787ec7954bb375536160e0d,citation,https://arxiv.org/pdf/1805.07410.pdf,Learning to Collaborate for User-Controlled Privacy,2018 +21,IMDB,imdb_wiki,51.5231607,-0.1282037,University College London,edu,cca9ae621e8228cfa787ec7954bb375536160e0d,citation,https://arxiv.org/pdf/1805.07410.pdf,Learning to Collaborate for User-Controlled Privacy,2018 +22,IMDB,imdb_wiki,51.7534538,-1.25400997,University of Oxford,edu,3ca5d3b8f5f071148cb50f22955fd8c1c1992719,citation,http://pdfs.semanticscholar.org/3ca5/d3b8f5f071148cb50f22955fd8c1c1992719.pdf,Evaluating race and sex diversity in the world's largest companies using deep neural networks,2017 +23,IMDB,imdb_wiki,53.57227,9.99472,"Dermalog Identification Systems, Hamburg, Germany",company,5b64584d6b01e66dfd0b6025b2552db1447ccdeb,citation,https://doi.org/10.1109/BTAS.2017.8272697,Deep expectation for estimation of fingerprint orientation fields,2017 +24,IMDB,imdb_wiki,60.7897318,10.6821927,"Norwegian Biometrics Lab, NTNU, Norway",edu,5b64584d6b01e66dfd0b6025b2552db1447ccdeb,citation,https://doi.org/10.1109/BTAS.2017.8272697,Deep expectation for estimation of fingerprint orientation fields,2017 +25,IMDB,imdb_wiki,51.49887085,-0.17560797,Imperial College London,edu,56e079f4eb40744728fd1d7665938b06426338e5,citation,https://arxiv.org/pdf/1705.04293.pdf,Bayesian Approaches to Distribution Regression,2018 +26,IMDB,imdb_wiki,51.5231607,-0.1282037,University College London,edu,56e079f4eb40744728fd1d7665938b06426338e5,citation,https://arxiv.org/pdf/1705.04293.pdf,Bayesian Approaches to Distribution Regression,2018 +27,IMDB,imdb_wiki,51.7534538,-1.25400997,University of Oxford,edu,56e079f4eb40744728fd1d7665938b06426338e5,citation,https://arxiv.org/pdf/1705.04293.pdf,Bayesian Approaches to Distribution Regression,2018 +28,IMDB,imdb_wiki,45.5039761,-73.5749687,McGill University,edu,407bb798ab153bf6156ba2956f8cf93256b6910a,citation,http://pdfs.semanticscholar.org/407b/b798ab153bf6156ba2956f8cf93256b6910a.pdf,Fisher Pruning of Deep Nets for Facial Trait Classification,2018 +29,IMDB,imdb_wiki,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,39c10888a470b92b917788c57a6fd154c97b421c,citation,https://doi.org/10.1109/VCIP.2017.8305036,Joint multi-feature fusion and attribute relationships for facial attribute prediction,2017 +30,IMDB,imdb_wiki,51.7534538,-1.25400997,University of Oxford,edu,eb027969f9310e0ae941e2adee2d42cdf07d938c,citation,https://arxiv.org/pdf/1710.08092.pdf,VGGFace2: A Dataset for Recognising Faces across Pose and Age,2018 +31,IMDB,imdb_wiki,45.5039761,-73.5749687,McGill University,edu,ed9d11e995baeec17c5d2847ec1a8d5449254525,citation,https://pdfs.semanticscholar.org/ed9d/11e995baeec17c5d2847ec1a8d5449254525.pdf,Efficient Gender Classification Using a Deep LDA-Pruned Net,2017 +32,IMDB,imdb_wiki,31.32235655,121.38400941,Shanghai University,edu,d454ad60b061c1a1450810a0f335fafbfeceeccc,citation,https://arxiv.org/pdf/1712.07195.pdf,Deep Regression Forests for Age Estimation,2017 +33,IMDB,imdb_wiki,40.0044795,116.370238,Chinese Academy of Sciences,edu,288964068cd87d97a98b8bc927d6e0d2349458a2,citation,https://pdfs.semanticscholar.org/2889/64068cd87d97a98b8bc927d6e0d2349458a2.pdf,Mean-Variance Loss for Deep Age Estimation from a Face,0 +34,IMDB,imdb_wiki,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,288964068cd87d97a98b8bc927d6e0d2349458a2,citation,https://pdfs.semanticscholar.org/2889/64068cd87d97a98b8bc927d6e0d2349458a2.pdf,Mean-Variance Loss for Deep Age Estimation from a Face,0 +35,IMDB,imdb_wiki,40.51865195,-74.44099801,State University of New Jersey,edu,d00e9a6339e34c613053d3b2c132fccbde547b56,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791154,A cascaded convolutional neural network for age estimation of unconstrained faces,2016 +36,IMDB,imdb_wiki,39.2899685,-76.62196103,University of Maryland,edu,d00e9a6339e34c613053d3b2c132fccbde547b56,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791154,A cascaded convolutional neural network for age estimation of unconstrained faces,2016 +37,IMDB,imdb_wiki,37.2830003,127.04548469,Ajou University,edu,c43dc4ae68a317b34a79636fadb3bcc4d1ccb61c,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369763,Age and gender estimation using deep residual learning network,2018 +38,IMDB,imdb_wiki,37.403917,127.159786,Korea Electronics Technology Institute,edu,c43dc4ae68a317b34a79636fadb3bcc4d1ccb61c,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369763,Age and gender estimation using deep residual learning network,2018 +39,IMDB,imdb_wiki,37.26728,126.9841151,Seoul National University,edu,c43dc4ae68a317b34a79636fadb3bcc4d1ccb61c,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369763,Age and gender estimation using deep residual learning network,2018 +40,IMDB,imdb_wiki,1.2962018,103.77689944,National University of Singapore,edu,5f94969b9491db552ffebc5911a45def99026afe,citation,https://pdfs.semanticscholar.org/5f94/969b9491db552ffebc5911a45def99026afe.pdf,Multimodal Learning and Reasoning for Visual Question Answering,2017 +41,IMDB,imdb_wiki,42.357757,-83.06286711,Wayne State University,edu,28d99dc2d673d62118658f8375b414e5192eac6f,citation,http://www.cs.wayne.edu/~mdong/cvpr17.pdf,Using Ranking-CNN for Age Estimation,2017 +42,IMDB,imdb_wiki,49.2767454,-122.91777375,Simon Fraser University,edu,975978ee6a32383d6f4f026b944099e7739e5890,citation,https://pdfs.semanticscholar.org/9759/78ee6a32383d6f4f026b944099e7739e5890.pdf,Privacy-Preserving Age Estimation for Content Rating,2018 +43,IMDB,imdb_wiki,49.8091536,-97.13304179,University of Manitoba,edu,975978ee6a32383d6f4f026b944099e7739e5890,citation,https://pdfs.semanticscholar.org/9759/78ee6a32383d6f4f026b944099e7739e5890.pdf,Privacy-Preserving Age Estimation for Content Rating,2018 +44,IMDB,imdb_wiki,43.66333345,-79.39769975,University of Toronto,edu,36a3a96ef54000a0cd63de867a5eb7e84396de09,citation,http://www.cs.toronto.edu/~guerzhoy/oriviz/crv17.pdf,Automatic Photo Orientation Detection with Convolutional Neural Networks,2017 +45,IMDB,imdb_wiki,31.32235655,121.38400941,Shanghai University,edu,5f0d4a0b5f72d8700cdf8cb179263a8fa866b59b,citation,https://pdfs.semanticscholar.org/5f0d/4a0b5f72d8700cdf8cb179263a8fa866b59b.pdf,Memo No . 85 06 / 2018 Deep Regression Forests for Age Estimation,2018 +46,IMDB,imdb_wiki,51.5247272,-0.03931035,Queen Mary University of London,edu,6cefb70f4668ee6c0bf0c18ea36fd49dd60e8365,citation,http://pdfs.semanticscholar.org/6cef/b70f4668ee6c0bf0c18ea36fd49dd60e8365.pdf,Privacy-Preserving Deep Inference for Rich User Data on The Cloud,2017 +47,IMDB,imdb_wiki,35.7036227,51.35125097,Sharif University of Technology,edu,6cefb70f4668ee6c0bf0c18ea36fd49dd60e8365,citation,http://pdfs.semanticscholar.org/6cef/b70f4668ee6c0bf0c18ea36fd49dd60e8365.pdf,Privacy-Preserving Deep Inference for Rich User Data on The Cloud,2017 +48,IMDB,imdb_wiki,51.99882735,4.37396037,Delft University of Technology,edu,dfbf941adeea19f5dff4a70a466ddd1b77f3b727,citation,https://pdfs.semanticscholar.org/dfbf/941adeea19f5dff4a70a466ddd1b77f3b727.pdf,Models for supervised learning in sequence data,2018 +49,IMDB,imdb_wiki,36.3697191,127.362537,Korea Advanced Institute of Science and Technology,edu,cb27b45329d61f5f95ed213798d4b2a615e76be2,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8329236,Deep Facial Age Estimation Using Conditional Multitask Learning With Weak Label Expansion,2018 +50,IMDB,imdb_wiki,37.2520226,127.0555019,"Samsung SAIT, Korea",company,cb27b45329d61f5f95ed213798d4b2a615e76be2,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8329236,Deep Facial Age Estimation Using Conditional Multitask Learning With Weak Label Expansion,2018 +51,IMDB,imdb_wiki,35.9042272,-78.85565763,"IBM Research, North Carolina",company,00a967cb2d18e1394226ad37930524a31351f6cf,citation,https://arxiv.org/pdf/1611.05377v1.pdf,Fully-Adaptive Feature Sharing in Multi-Task Networks with Applications in Person Attribute Classification,2017 +52,IMDB,imdb_wiki,12.9803537,77.6975101,"Samsung R&D Institute, Bangalore, India",company,cf736f596bf881ca97ec4b29776baaa493b9d50e,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952629,Low Dimensional Deep Features for facial landmark alignment,2017 +53,IMDB,imdb_wiki,-35.0636071,147.3552234,Charles Sturt University,edu,2e231f1e7e641dd3619bec59e14d02e91360ac01,citation,https://arxiv.org/pdf/1807.10421.pdf,Fusion Network for Face-Based Age Estimation,2018 +54,IMDB,imdb_wiki,51.3791442,-2.3252332,University of Bath,edu,2e231f1e7e641dd3619bec59e14d02e91360ac01,citation,https://arxiv.org/pdf/1807.10421.pdf,Fusion Network for Face-Based Age Estimation,2018 +55,IMDB,imdb_wiki,1.340216,103.965089,Singapore University of Technology and Design,edu,00823e6c0b6f1cf22897b8d0b2596743723ec51c,citation,https://arxiv.org/pdf/1708.07689.pdf,Understanding and Comparing Deep Neural Networks for Age and Gender Classification,2017 +56,IMDB,imdb_wiki,31.2284923,121.40211389,East China Normal University,edu,5364e58ba1f4cdfcffb247c2421e8f56a75fad8d,citation,https://doi.org/10.1109/VCIP.2017.8305113,Facial age estimation through self-paced learning,2017 +57,IMDB,imdb_wiki,61.44964205,23.85877462,Tampere University of Technology,edu,7f21a7441c6ded38008c1fd0b91bdd54425d3f80,citation,https://arxiv.org/pdf/1809.05474.pdf,Real Time System for Facial Analysis,2018 +58,IMDB,imdb_wiki,55.94951105,-3.19534913,University of Edinburgh,edu,f5fae7810a33ed67852ad6a3e0144cb278b24b41,citation,https://pdfs.semanticscholar.org/f5fa/e7810a33ed67852ad6a3e0144cb278b24b41.pdf,Multilingual Gender Classification with Multi-view Deep Learning: Notebook for PAN at CLEF 2018,2018 +59,IMDB,imdb_wiki,40.9153196,-73.1270626,Stony Brook University,edu,1190cba0cae3c8bb81bf80d6a0a83ae8c41240bc,citation,https://pdfs.semanticscholar.org/1190/cba0cae3c8bb81bf80d6a0a83ae8c41240bc.pdf,Squared Earth Mover ’ s Distance Loss for Training Deep Neural Networks on Ordered-Classes,2017 +60,IMDB,imdb_wiki,26.88111275,112.62850666,Hunan University,edu,86d0127e1fd04c3d8ea78401c838af621647dc95,citation,https://arxiv.org/pdf/1804.02810.pdf,A Novel Multi-Task Tensor Correlation Neural Network for Facial Attribute Prediction,2018 +61,IMDB,imdb_wiki,28.2290209,112.99483204,"National University of Defense Technology, China",edu,86d0127e1fd04c3d8ea78401c838af621647dc95,citation,https://arxiv.org/pdf/1804.02810.pdf,A Novel Multi-Task Tensor Correlation Neural Network for Facial Attribute Prediction,2018 +62,IMDB,imdb_wiki,29.58333105,-98.61944505,University of Texas at San Antonio,edu,86d0127e1fd04c3d8ea78401c838af621647dc95,citation,https://arxiv.org/pdf/1804.02810.pdf,A Novel Multi-Task Tensor Correlation Neural Network for Facial Attribute Prediction,2018 +63,IMDB,imdb_wiki,22.42031295,114.20788644,Chinese University of Hong Kong,edu,d80a3d1f3a438e02a6685e66ee908446766fefa9,citation,https://arxiv.org/pdf/1708.09687.pdf,Quantifying Facial Age by Posterior of Age Comparisons,2017 +64,IMDB,imdb_wiki,51.49887085,-0.17560797,Imperial College London,edu,54bb25a213944b08298e4e2de54f2ddea890954a,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf,"AgeDB: The First Manually Collected, In-the-Wild Age Database",2017 +65,IMDB,imdb_wiki,51.59029705,-0.22963221,Middlesex University,edu,54bb25a213944b08298e4e2de54f2ddea890954a,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf,"AgeDB: The First Manually Collected, In-the-Wild Age Database",2017 +66,IMDB,imdb_wiki,40.00229045,116.32098908,Tsinghua University,edu,493c8591d6a1bef5d7b84164a73761cefb9f5a25,citation,http://dl.acm.org/citation.cfm?id=3159691,User Profiling through Deep Multimodal Fusion,2018 +67,IMDB,imdb_wiki,47.6543238,-122.30800894,University of Washington,edu,493c8591d6a1bef5d7b84164a73761cefb9f5a25,citation,http://dl.acm.org/citation.cfm?id=3159691,User Profiling through Deep Multimodal Fusion,2018 +68,IMDB,imdb_wiki,30.44235995,-84.29747867,Florida State University,edu,b8c08c1330779283b3fbf06d133faf8bd55ea941,citation,https://arxiv.org/pdf/1803.11521.pdf,Online Regression with Feature Selection in Stochastic Data Streams,2018 +69,IMDB,imdb_wiki,30.44235995,-84.29747867,Florida State University,edu,1cfca6b71b0ead87bbb79a8614ddec3a10100faa,citation,https://arxiv.org/pdf/1809.05465.pdf,Are screening methods useful in feature selection? An empirical study,2018 +70,IMDB,imdb_wiki,51.49887085,-0.17560797,Imperial College London,edu,a06b6d30e2b31dc600f622ab15afe5e2929581a7,citation,https://ibug.doc.ic.ac.uk/media/uploads/documents/2209.pdf,Robust Joint and Individual Variance Explained,2017 +71,IMDB,imdb_wiki,51.59029705,-0.22963221,Middlesex University,edu,a06b6d30e2b31dc600f622ab15afe5e2929581a7,citation,https://ibug.doc.ic.ac.uk/media/uploads/documents/2209.pdf,Robust Joint and Individual Variance Explained,2017 +72,IMDB,imdb_wiki,32.0575279,118.78682252,Southeast University,edu,8ff8c64288a2f7e4e8bf8fda865820b04ab3dbe8,citation,https://pdfs.semanticscholar.org/0056/92b9fa6728df3a7f14578c43410867bba425.pdf,Age Estimation Using Expectation of Label Distribution Learning,2018 +73,IMDB,imdb_wiki,32.0565957,118.77408833,Nanjing University,edu,8ff8c64288a2f7e4e8bf8fda865820b04ab3dbe8,citation,https://pdfs.semanticscholar.org/0056/92b9fa6728df3a7f14578c43410867bba425.pdf,Age Estimation Using Expectation of Label Distribution Learning,2018 +74,IMDB,imdb_wiki,58.38131405,26.72078081,University of Tartu,edu,1b248ed8e7c9514648cd598960fadf9ab17e7fe8,citation,https://pdfs.semanticscholar.org/1b24/8ed8e7c9514648cd598960fadf9ab17e7fe8.pdf,"From apparent to real age: gender, age, ethnic, makeup, and expression bias analysis in real age estimation",0 +75,IMDB,imdb_wiki,41.3868913,2.16352385,University of Barcelona,edu,1b248ed8e7c9514648cd598960fadf9ab17e7fe8,citation,https://pdfs.semanticscholar.org/1b24/8ed8e7c9514648cd598960fadf9ab17e7fe8.pdf,"From apparent to real age: gender, age, ethnic, makeup, and expression bias analysis in real age estimation",0 +76,IMDB,imdb_wiki,35.9542493,-83.9307395,University of Tennessee,edu,7fab17ef7e25626643f1d55257a3e13348e435bd,citation,https://arxiv.org/pdf/1702.08423.pdf,Age Progression/Regression by Conditional Adversarial Autoencoder,2017 +77,IMDB,imdb_wiki,37.4102193,-122.05965487,Carnegie Mellon University,edu,ec05078be14a11157ac0e1c6b430ac886124589b,citation,http://pdfs.semanticscholar.org/ec05/078be14a11157ac0e1c6b430ac886124589b.pdf,Longitudinal Face Aging in the Wild - Recent Deep Learning Approaches,2018 +78,IMDB,imdb_wiki,45.57022705,-122.63709346,Concordia University,edu,ec05078be14a11157ac0e1c6b430ac886124589b,citation,http://pdfs.semanticscholar.org/ec05/078be14a11157ac0e1c6b430ac886124589b.pdf,Longitudinal Face Aging in the Wild - Recent Deep Learning Approaches,2018 +79,IMDB,imdb_wiki,40.00229045,116.32098908,Tsinghua University,edu,2149d49c84a83848d6051867290d9c8bfcef0edb,citation,https://doi.org/10.1109/TIFS.2017.2746062,Label-Sensitive Deep Metric Learning for Facial Age Estimation,2018 +80,IMDB,imdb_wiki,42.36782045,-71.12666653,Harvard University,edu,0ba402af3b8682e2aa89f76bd823ddffdf89fa0a,citation,http://pdfs.semanticscholar.org/c0d8/4377168c554cb8e83099bed940091fe49dec.pdf,Squared Earth Mover's Distance-based Loss for Training Deep Neural Networks,2016 +81,IMDB,imdb_wiki,40.9153196,-73.1270626,Stony Brook University,edu,0ba402af3b8682e2aa89f76bd823ddffdf89fa0a,citation,http://pdfs.semanticscholar.org/c0d8/4377168c554cb8e83099bed940091fe49dec.pdf,Squared Earth Mover's Distance-based Loss for Training Deep Neural Networks,2016 +82,IMDB,imdb_wiki,46.0658836,11.1159894,University of Trento,edu,df31e9c882dfb3ea5a3abe3b139ceacb1d90a302,citation,https://arxiv.org/pdf/1808.09211.pdf,DeepGUM: Learning Deep Robust Regression with a Gaussian-Uniform Mixture Model,2018 +83,IMDB,imdb_wiki,51.7534538,-1.25400997,University of Oxford,edu,523854a7d8755e944bd50217c14481fe1329a969,citation,https://arxiv.org/pdf/1808.00380.pdf,A Differentially Private Kernel Two-Sample Test,2018 +84,IMDB,imdb_wiki,51.49887085,-0.17560797,Imperial College London,edu,9b0489f2d5739213ef8c3e2e18739c4353c3a3b7,citation,http://pdfs.semanticscholar.org/9b04/89f2d5739213ef8c3e2e18739c4353c3a3b7.pdf,Visual Data Augmentation through Learning,2018 +85,IMDB,imdb_wiki,51.59029705,-0.22963221,Middlesex University,edu,9b0489f2d5739213ef8c3e2e18739c4353c3a3b7,citation,http://pdfs.semanticscholar.org/9b04/89f2d5739213ef8c3e2e18739c4353c3a3b7.pdf,Visual Data Augmentation through Learning,2018 +86,IMDB,imdb_wiki,40.00229045,116.32098908,Tsinghua University,edu,51f626540860ad75b68206025a45466a6d087aa6,citation,https://doi.org/10.1109/ICIP.2017.8296595,Cluster convolutional neural networks for facial age estimation,2017 +87,IMDB,imdb_wiki,49.2593879,-122.9151893,"AltumView Systems Inc., Burnaby, BC, Canada",company,b44f03b5fa8c6275238c2d13345652e6ff7e6ea9,citation,https://doi.org/10.1109/GlobalSIP.2017.8309138,Lapped convolutional neural networks for embedded systems,2017 +88,IMDB,imdb_wiki,39.2899685,-76.62196103,University of Maryland,edu,93420d9212dd15b3ef37f566e4d57e76bb2fab2f,citation,https://arxiv.org/pdf/1611.00851.pdf,An All-In-One Convolutional Neural Network for Face Analysis,2017 +89,IMDB,imdb_wiki,22.15263985,113.56803206,Macau University of Science and Technology,edu,56f231fc40424ed9a7c93cbc9f5a99d022e1d242,citation,http://pdfs.semanticscholar.org/d060/f2f3641c6a89ade021eea749414a5c6b443f.pdf,Age Estimation Based on a Single Network with Soft Softmax of Aging Modeling,2016 +90,IMDB,imdb_wiki,40.0044795,116.370238,Chinese Academy of Sciences,edu,56f231fc40424ed9a7c93cbc9f5a99d022e1d242,citation,http://pdfs.semanticscholar.org/d060/f2f3641c6a89ade021eea749414a5c6b443f.pdf,Age Estimation Based on a Single Network with Soft Softmax of Aging Modeling,2016 +91,IMDB,imdb_wiki,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,56f231fc40424ed9a7c93cbc9f5a99d022e1d242,citation,http://pdfs.semanticscholar.org/d060/f2f3641c6a89ade021eea749414a5c6b443f.pdf,Age Estimation Based on a Single Network with Soft Softmax of Aging Modeling,2016 +92,IMDB,imdb_wiki,43.614386,7.071125,EURECOM,edu,1648cf24c042122af2f429641ba9599a2187d605,citation,https://doi.org/10.1109/BTAS.2017.8272698,Boosting cross-age face verification via generative age normalization,2017 +93,IMDB,imdb_wiki,21.003952,105.84360183,Hanoi University of Science and Technology,edu,ca37933b6297cdca211aa7250cbe6b59f8be40e5,citation,http://doi.acm.org/10.1145/3155133.3155207,"Multi-task learning for smile detection, emotion recognition and gender classification",2017 +94,IMDB,imdb_wiki,51.49887085,-0.17560797,Imperial College London,edu,cf2002fac81ccdccdadb5cc43f7b1cd30882d2c2,citation,https://arxiv.org/pdf/1803.09546.pdf,Calibrated Prediction Intervals for Neural Network Regressors,2018 +95,IMDB,imdb_wiki,51.7534538,-1.25400997,University of Oxford,edu,75f9d3533f175943e33c9155f4038488f32a24bc,citation,https://arxiv.org/pdf/1811.06817.pdf,Evaluating Uncertainty Quantification in End-to-End Autonomous Driving Control,2018 +96,IMDB,imdb_wiki,32.8536333,-117.2035286,Kyung Hee University,edu,854b1f0581f5d3340f15eb79452363cbf38c04c8,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7903648,Directional Age-Primitive Pattern (DAPP) for Human Age Group Recognition and Age Estimation,2017 +97,IMDB,imdb_wiki,24.7246403,46.62335012,King Saud University,edu,854b1f0581f5d3340f15eb79452363cbf38c04c8,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7903648,Directional Age-Primitive Pattern (DAPP) for Human Age Group Recognition and Age Estimation,2017 +98,IMDB,imdb_wiki,23.7289899,90.3982682,Institute of Information Technology,edu,854b1f0581f5d3340f15eb79452363cbf38c04c8,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7903648,Directional Age-Primitive Pattern (DAPP) for Human Age Group Recognition and Age Estimation,2017 +99,IMDB,imdb_wiki,28.5456282,77.2731505,"IIIT Delhi, India",edu,f726738954e7055bb3615fa7e8f59f136d3e0bdc,citation,https://arxiv.org/pdf/1803.07385.pdf,Are you eligible? Predicting adulthood from face images via class specific mean autoencoder,2018 +100,IMDB,imdb_wiki,42.0551164,-87.67581113,Northwestern University,edu,c1586ee25e660f31cba0ca9ba5bf39ffcc020aab,citation,https://arxiv.org/pdf/1807.06708.pdf,A Modulation Module for Multi-task Learning with Applications in Image Retrieval,2018 +101,IMDB,imdb_wiki,37.4102193,-122.05965487,Carnegie Mellon University,edu,c1586ee25e660f31cba0ca9ba5bf39ffcc020aab,citation,https://arxiv.org/pdf/1807.06708.pdf,A Modulation Module for Multi-task Learning with Applications in Image Retrieval,2018 +102,IMDB,imdb_wiki,30.04287695,31.23664139,American University in Cairo,edu,3a2c90e0963bfb07fc7cd1b5061383e9a99c39d2,citation,https://arxiv.org/pdf/1710.03804.pdf,End-to-End Deep Learning for Steering Autonomous Vehicles Considering Temporal Dependencies,2017 +103,IMDB,imdb_wiki,31.83907195,117.26420748,University of Science and Technology of China,edu,47cd161546c59ab1e05f8841b82e985f72e5ddcb,citation,https://doi.org/10.1109/ICIP.2017.8296552,Gender classification in live videos,2017 +104,IMDB,imdb_wiki,39.2899685,-76.62196103,University of Maryland,edu,1491d0938bb4183bd19f2fee3b61997e1918160d,citation,https://arxiv.org/pdf/1807.00453.pdf,Elastic Neural Networks: A Scalable Framework for Embedded Computer Vision,2018 +105,IMDB,imdb_wiki,30.44235995,-84.29747867,Florida State University,edu,b88bace97d214d279e3a2053ccff0b6425295708,citation,https://arxiv.org/pdf/1803.11521.pdf,A Novel Framework for Online Supervised Learning with Feature Selection,2018 +106,IMDB,imdb_wiki,61.44964205,23.85877462,Tampere University of Technology,edu,b20cfbb2348984b4e25b6b9174f3c7b65b6aed9e,citation,http://pdfs.semanticscholar.org/b20c/fbb2348984b4e25b6b9174f3c7b65b6aed9e.pdf,Learning with Ambiguous Label Distribution for Apparent Age Estimation,2016 +107,IMDB,imdb_wiki,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,f3ec43a7b22f6e5414fec473acda8ffd843e7baf,citation,https://arxiv.org/pdf/1809.07447.pdf,A Coupled Evolutionary Network for Age Estimation,2018 +108,IMDB,imdb_wiki,39.94976005,116.33629046,Beijing Jiaotong University,edu,4b9ec224949c79a980a5a66664d0ac6233c3d575,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7565501,Human Facial Age Estimation by Cost-Sensitive Label Ranking and Trace Norm Regularization,2017 +109,IMDB,imdb_wiki,43.1576969,-77.58829158,University of Rochester,edu,4b9ec224949c79a980a5a66664d0ac6233c3d575,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7565501,Human Facial Age Estimation by Cost-Sensitive Label Ranking and Trace Norm Regularization,2017 +110,IMDB,imdb_wiki,1.2962018,103.77689944,National University of Singapore,edu,4b9ec224949c79a980a5a66664d0ac6233c3d575,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7565501,Human Facial Age Estimation by Cost-Sensitive Label Ranking and Trace Norm Regularization,2017 +111,IMDB,imdb_wiki,31.846918,117.29053367,Hefei University of Technology,edu,dc9c0527f8d4461b1742cccc7317fec8dd96d81a,citation,https://arxiv.org/pdf/1805.08373.pdf,Speeding-Up Age Estimation in Intelligent Demographics System via Network Optimization,2018 +112,IMDB,imdb_wiki,1.3484104,103.68297965,Nanyang Technological University,edu,dc9c0527f8d4461b1742cccc7317fec8dd96d81a,citation,https://arxiv.org/pdf/1805.08373.pdf,Speeding-Up Age Estimation in Intelligent Demographics System via Network Optimization,2018 +113,IMDB,imdb_wiki,43.614386,7.071125,EURECOM,edu,f7b422df567ce9813926461251517761e3e6cda0,citation,https://arxiv.org/pdf/1702.01983.pdf,Face aging with conditional generative adversarial networks,2017 +114,IMDB,imdb_wiki,21.003952,105.84360183,Hanoi University of Science and Technology,edu,68573e296f069071d071fc158e974e8bc70c893f,citation,https://pdfs.semanticscholar.org/6857/3e296f069071d071fc158e974e8bc70c893f.pdf,"Effective Deep Multi-source Multi-task Learning Frameworks for Smile Detection, Emotion Recognition and Gender Classification",2018 +115,IMDB,imdb_wiki,46.0658836,11.1159894,University of Trento,edu,cb43519894258b125624dc0df655ab5357b1e42f,citation,https://arxiv.org/pdf/1802.00237.pdf,Face Aging with Contextual Generative Adversarial Nets,2017 +116,IMDB,imdb_wiki,1.2962018,103.77689944,National University of Singapore,edu,cb43519894258b125624dc0df655ab5357b1e42f,citation,https://arxiv.org/pdf/1802.00237.pdf,Face Aging with Contextual Generative Adversarial Nets,2017 +117,IMDB,imdb_wiki,43.614386,7.071125,EURECOM,edu,70569810e46f476515fce80a602a210f8d9a2b95,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.105,Apparent Age Estimation from Face Images Combining General and Children-Specialized Deep Learning Models,2016 +118,IMDB,imdb_wiki,43.614386,7.071125,EURECOM,edu,f519723238701849f1160d5a9cedebd31017da89,citation,http://pdfs.semanticscholar.org/f519/723238701849f1160d5a9cedebd31017da89.pdf,Impact of multi-focused images on recognition of soft biometric traits,2016 +119,IMDB,imdb_wiki,51.7534538,-1.25400997,University of Oxford,edu,4eab317b5ac436a949849ed286baa3de2a541eef,citation,https://arxiv.org/pdf/1809.02169.pdf,Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings,2018 +120,IMDB,imdb_wiki,33.5866784,-101.87539204,Electrical and Computer Engineering,edu,ebb3d5c70bedf2287f9b26ac0031004f8f617b97,citation,https://doi.org/10.1109/MSP.2017.2764116,"Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans",2018 +121,IMDB,imdb_wiki,39.2899685,-76.62196103,University of Maryland,edu,ebb3d5c70bedf2287f9b26ac0031004f8f617b97,citation,https://doi.org/10.1109/MSP.2017.2764116,"Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans",2018 +122,IMDB,imdb_wiki,51.7534538,-1.25400997,University of Oxford,edu,70c59dc3470ae867016f6ab0e008ac8ba03774a1,citation,https://arxiv.org/pdf/1710.08092.pdf,VGGFace2: A Dataset for Recognising Faces across Pose and Age,2018 +123,IMDB,imdb_wiki,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,3dfb822e16328e0f98a47209d7ecd242e4211f82,citation,https://arxiv.org/pdf/1708.08197.pdf,Cross-Age LFW: A Database for Studying Cross-Age Face Recognition in Unconstrained Environments,2017 +124,IMDB,imdb_wiki,30.44235995,-84.29747867,Florida State University,edu,bc99ff149c3c75b90f0110b8e72a9ae1300e29e7,citation,https://arxiv.org/pdf/1804.02744.pdf,Unsupervised Learning of Mixture Models with a Uniform Background Component,2018 +125,IMDB,imdb_wiki,60.18558755,24.8242733,Aalto University,edu,08d41d2f68a2bf0091dc373573ca379de9b16385,citation,https://arxiv.org/pdf/1802.05023.pdf,Recursive Chaining of Reversible Image-to-image Translators For Face Aging,2018 +126,IMDB,imdb_wiki,25.0410728,121.6147562,Institute of Information Science,edu,0951f42abbf649bb564a21d4ff5dddf9a5ea54d9,citation,https://arxiv.org/pdf/1806.02023.pdf,Joint Estimation of Age and Gender from Unconstrained Face Images Using Lightweight Multi-Task CNN for Mobile Applications,2018 +127,IMDB,imdb_wiki,53.21967825,6.56251482,University of Groningen,edu,8efda5708bbcf658d4f567e3866e3549fe045bbb,citation,http://pdfs.semanticscholar.org/8efd/a5708bbcf658d4f567e3866e3549fe045bbb.pdf,Pre-trained Deep Convolutional Neural Networks for Face Recognition,2018 +128,IMDB,imdb_wiki,22.5447154,113.9357164,Tencent,company,7a7fddb3020e0c2dd4e3fe275329eb10f1cfbb8a,citation,https://arxiv.org/pdf/1810.07599.pdf,Orthogonal Deep Features Decomposition for Age-Invariant Face Recognition,2018 diff --git a/site/datasets/final/lfw.json b/site/datasets/final/lfw.json index 33a12d09..f15d9156 100644 --- a/site/datasets/final/lfw.json +++ b/site/datasets/final/lfw.json @@ -1 +1 @@ -{"id": "7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22", "paper": {"paper_id": "7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22", "key": "lfw", "title": "Labeled Faces in the Wild: A Survey", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf", "address": "", "name": "LFW"}, "address": {"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}, "additional_papers": [{"paper_id": "370b5757a5379b15e30d619e4d3fb9e8e13f3256", "key": "lfw", "title": "Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf", "address": "", "name": "LFW"}, {"paper_id": "2d3482dcff69c7417c7b933f22de606a0e8e42d4", "key": "lfw", "title": "Labeled Faces in the Wild : Updates and New Reporting Procedures", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf", "address": {"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}, "name": "LFW"}], "citations": [{"id": "e94dfdc5581f6bc0338e21ad555b5f1734f8697e", "title": "Learning to Anonymize Faces for Privacy Preserving Action Detection", "addresses": [{"address": "University of California, Davis", "lat": "38.53363490", "lng": "-121.79077264", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.11556.pdf"}, {"id": "aa892fe17c06e2b18db2b12314499a741e755df7", "title": "Improved performance of face recognition using CNN with constrained triplet loss layer", "addresses": [{"address": "University of Sydney", "lat": "-33.88890695", "lng": "151.18943366", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/IJCNN.2017.7966089"}, {"id": "672fae3da801b2a0d2bad65afdbbbf1b2320623e", "title": "Pose-Selective Max Pooling for Measuring Similarity", "addresses": [{"address": "Johns Hopkins University", "lat": "39.32905300", "lng": "-76.61942500", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1609.07042.pdf"}, {"id": "450c6a57f19f5aa45626bb08d7d5d6acdb863b4b", "title": "Towards Interpretable Face Recognition", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.00611.pdf"}, {"id": "a90e6751ae32cb2983891ef2216293311cd6a8e9", "title": "Clustering using Ensemble Clustering Technique", "addresses": [{"address": "Chongqing University", "lat": "29.50841740", "lng": "106.57858552", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/a90e/6751ae32cb2983891ef2216293311cd6a8e9.pdf"}, {"id": "9be696618cfcea90879747a8512f21b10cceac48", "title": "Structural Consistency and Controllability for Diverse Colorization", "addresses": [{"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.02129.pdf"}, {"id": "d8fbd3a16d2e2e59ce0cff98b3fd586863878dc1", "title": "Face detection and recognition for home service robots with end-to-end deep neural networks", "addresses": [{"address": "Futurewei Technologies Inc., Santa Clara, CA", "lat": "37.37344400", "lng": "-121.96487270", "type": "company"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952553"}, {"id": "d4448f8aa320f04066cc43201d55ddd023eb712e", "title": "Clothing Change Aware Person Identification", "addresses": [{"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}, {"address": "University of South Carolina", "lat": "33.99282980", "lng": "-81.02685168", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/d444/8f8aa320f04066cc43201d55ddd023eb712e.pdf"}, {"id": "e1256ff535bf4c024dd62faeb2418d48674ddfa2", "title": "Towards Open-Set Identity Preserving Face Synthesis", "addresses": [{"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.11182.pdf"}, {"id": "54bb25a213944b08298e4e2de54f2ddea890954a", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Middlesex University", "lat": "51.59029705", "lng": "-0.22963221", "type": "edu"}], "year": 2017, "pdf": "http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf"}, {"id": "10e0e6f1ec00b20bc78a5453a00c792f1334b016", "title": "Temporal Selective Max Pooling Towards Practical Face Recognition", "addresses": [{"address": "Johns Hopkins University", "lat": "39.32905300", "lng": "-76.61942500", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/672f/ae3da801b2a0d2bad65afdbbbf1b2320623e.pdf"}, {"id": "ebb3d5c70bedf2287f9b26ac0031004f8f617b97", "title": "Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans", "addresses": [{"address": "Electrical and Computer Engineering", "lat": "33.58667840", "lng": "-101.87539204", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/MSP.2017.2764116"}, {"id": "0c1d85a197a1f5b7376652a485523e616a406273", "title": "Joint Registration and Representation Learning for Unconstrained Face Identification", "addresses": [{"address": "Khalifa University", "lat": "24.44690250", "lng": "54.39425630", "type": "edu"}, {"address": "University of Canberra", "lat": "-35.23656905", "lng": "149.08446994", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.169"}, {"id": "585260468d023ffc95f0e539c3fa87254c28510b", "title": "Cardea: Context-Aware Visual Privacy Protection from Pervasive Cameras", "addresses": [{"address": "Hong Kong University of Science and Technology", "lat": "22.33863040", "lng": "114.26203370", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/5852/60468d023ffc95f0e539c3fa87254c28510b.pdf"}, {"id": "3dfd94d3fad7e17f52a8ae815eb9cc5471172bc0", "title": "Face2Text: Collecting an Annotated Image Description Corpus for the Generation of Rich Face Descriptions", "addresses": [{"address": "University of Copenhagen", "lat": "55.68015020", "lng": "12.57232700", "type": "edu"}, {"address": "University of Malta", "lat": "35.90232260", "lng": "14.48341890", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/3dfd/94d3fad7e17f52a8ae815eb9cc5471172bc0.pdf"}, {"id": "db84c6fd771a073023f2b42e48a68eb2d9d31e4a", "title": "A Deep Variational Autoencoder Approach for Robust Facial Symmetrization", "addresses": [{"address": "Shandong University of Science and Technology", "lat": "36.00146435", "lng": "120.11624057", "type": "edu"}, {"address": "Ocean University of China", "lat": "36.16161795", "lng": "120.49355276", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/db84/c6fd771a073023f2b42e48a68eb2d9d31e4a.pdf"}, {"id": "486840f4f524e97f692a7f6b42cd19019ee71533", "title": "DeepVisage: Making Face Recognition Simple Yet With Powerful Generalization Skills", "addresses": [{"address": "\u00c9cole Centrale de Lyon", "lat": "45.78359660", "lng": "4.76789480", "type": "edu"}, {"address": "Safran Identity and Security", "lat": "48.83249300", "lng": "2.26747400", "type": "company"}], "year": 2017, "pdf": "https://arxiv.org/pdf/1703.08388v2.pdf"}, {"id": "511a8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7", "title": "A Community Detection Approach to Cleaning Extremely Large Face Database", "addresses": [{"address": "National University of Defense Technology, China", "lat": "28.22902090", "lng": "112.99483204", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/511a/8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7.pdf"}, {"id": "50b58becaf67e92a6d9633e0eea7d352157377c3", "title": "Dependency-Aware Attention Control for Unconstrained Face Recognition with Image Sets", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/50b5/8becaf67e92a6d9633e0eea7d352157377c3.pdf"}, {"id": "5865b6d83ba6dbbf9167f1481e9339c2ef1d1f6b", "title": "Regularized metric adaptation for unconstrained face verification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICPR.2016.7900278"}, {"id": "86204fc037936754813b91898377e8831396551a", "title": "Dense Face Alignment", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1709.01442.pdf"}, {"id": "24b5ea4e262e22768813e7b6581f60e4ab9a8de7", "title": "Facial Soft Biometrics for Recognition in the Wild: Recent Works, Annotation, and COTS Evaluation", "addresses": [{"address": "Universidad Autonoma de Madrid", "lat": "40.48256135", "lng": "-3.69060790", "type": "edu"}, {"address": "Nokia Bell-Labs, Madrid, Spain", "lat": "40.39059140", "lng": "-74.18638510", "type": "company"}, {"address": "Halmstad University", "lat": "56.66340325", "lng": "12.87929727", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/TIFS.2018.2807791"}, {"id": "def2983576001bac7d6461d78451159800938112", "title": "The Do\u2019s and Don\u2019ts for CNN-Based Face Verification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1705.07426.pdf"}, {"id": "35700f9a635bd3c128ab41718b040a0c28d6361a", "title": "DeepGait: A Learning Deep Convolutional Representation for View-Invariant Gait Recognition Using Joint Bayesian", "addresses": [{"address": "Zhejiang University", "lat": "30.19331415", "lng": "120.11930822", "type": "edu"}, {"address": "Zhejiang University of Technology", "lat": "30.29315340", "lng": "120.16204580", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/3570/0f9a635bd3c128ab41718b040a0c28d6361a.pdf"}, {"id": "1648cf24c042122af2f429641ba9599a2187d605", "title": "Boosting cross-age face verification via generative age normalization", "addresses": [{"address": "EURECOM", "lat": "43.61438600", "lng": "7.07112500", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/BTAS.2017.8272698"}, {"id": "856cc83a3121de89d4a6d9283afbcd5d7ef7aa2b", "title": "Image-to-Set Face Recognition Using Locality Repulsion Projections and Sparse Reconstruction-Based Similarity Measure", "addresses": [{"address": "Advanced Digital Sciences Center, Singapore", "lat": "1.30372570", "lng": "103.77377630", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6417014"}, {"id": "30180f66d5b4b7c0367e4b43e2b55367b72d6d2a", "title": "Template Adaptation for Face Verification and Identification", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": 2017, "pdf": "http://www.robots.ox.ac.uk/~vgg/publications/2017/Crosswhite17/crosswhite17.pdf"}, {"id": "3107316f243233d45e3c7e5972517d1ed4991f91", "title": "CVAE-GAN: Fine-Grained Image Generation through Asymmetric Training", "addresses": [{"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}], "year": 2017, "pdf": "http://arxiv.org/abs/1703.10155"}, {"id": "10f66f6550d74b817a3fdcef7fdeba13ccdba51c", "title": "Benchmarking Face Alignment", "addresses": [{"address": "Karlsruhe Institute of Technology", "lat": "49.10184375", "lng": "8.43312560", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/10f6/6f6550d74b817a3fdcef7fdeba13ccdba51c.pdf"}, {"id": "19458454308a9f56b7de76bf7d8ff8eaa52b0173", "title": "Deep Features for Recognizing Disguised Faces in the Wild", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/1945/8454308a9f56b7de76bf7d8ff8eaa52b0173.pdf"}, {"id": "44b827df6c433ca49bcf44f9f3ebfdc0774ee952", "title": "Deep Correlation Feature Learning for Face Verification in the Wild", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/LSP.2017.2726105"}, {"id": "a0b1990dd2b4cd87e4fd60912cc1552c34792770", "title": "Deep Constrained Local Models for Facial Landmark Detection", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/a0b1/990dd2b4cd87e4fd60912cc1552c34792770.pdf"}, {"id": "3c57e28a4eb463d532ea2b0b1ba4b426ead8d9a0", "title": "Defeating Image Obfuscation with Deep Learning", "addresses": [{"address": "University of Texas at Austin", "lat": "30.28415100", "lng": "-97.73195598", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/73cc/fdedbd7d72a147925727ba1932f9488cfde3.pdf"}, {"id": "d3a3d15a32644beffaac4322b9f165ed51cfd99b", "title": "Eye detection by using deep learning", "addresses": [{"address": "Gebze Technical University, Turkey", "lat": "40.80805620", "lng": "29.35612020", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/SIU.2016.7496197"}, {"id": "b13bf657ca6d34d0df90e7ae739c94a7efc30dc3", "title": "Attribute and Simile Classifiers for Face Verification (In submission please do not distribute.)", "addresses": [{"address": "Columbia University", "lat": "40.84198360", "lng": "-73.94368971", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/b13b/f657ca6d34d0df90e7ae739c94a7efc30dc3.pdf"}, {"id": "93af36da08bf99e68c9b0d36e141ed8154455ac2", "title": "A Dditive M Argin S Oftmax for F Ace V Erification", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}, {"address": "University of Electronic Science and Technology of China", "lat": "40.01419050", "lng": "-83.03091430", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/93af/36da08bf99e68c9b0d36e141ed8154455ac2.pdf"}, {"id": "280bc9751593897091015aaf2cab39805768b463", "title": "Gender Perception From Faces Using Boosted LBPH (Local Binary Patten Histograms)", "addresses": [{"address": "COMSATS Institute of Information Technology, Lahore", "lat": "31.40063320", "lng": "74.21372960", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/280b/c9751593897091015aaf2cab39805768b463.pdf"}, {"id": "e57ce6244ec696ff9aa42d6af7f09eed176153a8", "title": "Instantaneous real-time head pose at a distance", "addresses": [{"address": "Heriot-Watt University", "lat": "55.91029135", "lng": "-3.32345777", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICIP.2015.7351449"}, {"id": "bd9e0b6a90b51cc19b65f51dacd08ce1a7ccaac5", "title": "Avatar recommendation method based on facial attributes", "addresses": [{"address": "Korea Advanced Institute of Science and Technology", "lat": "36.36971910", "lng": "127.36253700", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/VSMM.2014.7136653"}, {"id": "9821669a989a3df9d598c1b4332d17ae8e35e294", "title": "Minimal Correlation Classification", "addresses": [{"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/9821/669a989a3df9d598c1b4332d17ae8e35e294.pdf"}, {"id": "04661729f0ff6afe4b4d6223f18d0da1d479accf", "title": "From Facial Parts Responses to Face Detection: A Deep Learning Approach", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.419"}, {"id": "1d5aad4f7fae6d414ffb212cec1f7ac876de48bf", "title": "Face retriever: Pre-filtering the gallery via deep neural net", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICB.2015.7139112"}, {"id": "03babadaaa7e71d4b65203e27e8957db649155c6", "title": "Distance Metric Learning via Iterated Support Vector Machines", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}, {"address": "Xi\u2019an Jiaotong University", "lat": "34.25080300", "lng": "108.98369300", "type": "edu"}, {"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}, {"address": "Educational Testing Service", "lat": "40.34946320", "lng": "-74.71481500", "type": "company"}, {"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2725578"}, {"id": "20eabf10e9591443de95b726d90cda8efa7e53bb", "title": "Discriminative Histogram Intersection Metric Learning and Its Applications", "addresses": [{"address": "Zhejiang University of Technology", "lat": "30.29315340", "lng": "120.16204580", "type": "edu"}, {"address": "Waseda University", "lat": "33.88987280", "lng": "130.70856205", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1007/s11390-017-1740-0"}, {"id": "0cdb49142f742f5edb293eb9261f8243aee36e12", "title": "Combined Learning of Salient Local Descriptors and Distance Metrics for Image Set Face Verification", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": 2012, "pdf": "http://arxiv.org/abs/1303.2783"}, {"id": "24b637c98b22cd932f74acfeecdb50533abea9ae", "title": "Robust Face Recognition via Minimum Error Entropy-Based Atomic Representation", "addresses": [{"address": "Hubei University", "lat": "30.48176100", "lng": "114.31096000", "type": "edu"}, {"address": "University of Macau", "lat": "22.12401870", "lng": "113.54510901", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/TIP.2015.2492819"}, {"id": "9d66de2a59ec20ca00a618481498a5320ad38481", "title": "POP: Privacy-Preserving Outsourced Photo Sharing and Searching for Mobile Devices", "addresses": [{"address": "Illinois Institute of Technology", "lat": "41.83619630", "lng": "-87.62655913", "type": "edu"}, {"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2015, "pdf": "http://www.cs.iit.edu/~xli/paper/Conf/POP-ICDCS15.pdf"}, {"id": "2a98b850139b911df5a336d6ebf33be7819ae122", "title": "Maximum entropy regularized group collaborative representation for face recognition", "addresses": [{"address": "Georgia Southern University", "lat": "32.42143805", "lng": "-81.78450529", "type": "edu"}, {"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICIP.2015.7350806"}, {"id": "14b016c7a87d142f4b9a0e6dc470dcfc073af517", "title": "Modest proposals for improving biometric recognition papers", "addresses": [{"address": "San Jose State University", "lat": "37.33519080", "lng": "-121.88126008", "type": "edu"}], "year": 2015, "pdf": "http://ws680.nist.gov/publication/get_pdf.cfm?pub_id=918912"}, {"id": "0aeb5020003e0c89219031b51bd30ff1bceea363", "title": "Sparsifying Neural Network Connections for Face Recognition", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.525"}, {"id": "3ff79cf6df1937949cc9bc522041a9a39d314d83", "title": "Adversarial examples: A survey", "addresses": [{"address": "Warsaw University of Technology", "lat": "52.22165395", "lng": "21.00735776", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8406730"}, {"id": "676f9eabf4cfc1fd625228c83ff72f6499c67926", "title": "Face Identification and Clustering", "addresses": [{"address": "State University of New Jersey", "lat": "40.51865195", "lng": "-74.44099801", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/676f/9eabf4cfc1fd625228c83ff72f6499c67926.pdf"}, {"id": "9ed4ad41cbad645e7109e146ef6df73f774cd75d", "title": "RPM: Random Points Matching for Pair wise Face-Similarity", "addresses": [{"address": "Karlsruhe Institute of Technology", "lat": "49.10184375", "lng": "8.43312560", "type": "edu"}, {"address": "ETH Z\u00fcrich", "lat": "47.37645340", "lng": "8.54770931", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/a83e/175ad5b2066e207f5d2ec830ae05bac266b9.pdf"}, {"id": "33ad23377eaead8955ed1c2b087a5e536fecf44e", "title": "Augmenting CRFs with Boltzmann Machine Shape Priors for Image Labeling", "addresses": [{"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}, {"address": "University of Michigan", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu"}], "year": 2013, "pdf": "http://vis-www.cs.umass.edu/papers/gloc_cvpr13.pdf"}, {"id": "e8aa1f207b4b0bb710f79ab47a671d5639696a56", "title": "Exploiting symmetry in two-dimensional clustering-based discriminant analysis for face recognition", "addresses": [{"address": "Aristotle University of Thessaloniki", "lat": "40.62984145", "lng": "22.95889350", "type": "edu"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7362364"}, {"id": "adce9902dca7f4e8a9b9cf6686ec6a7c0f2a0ba6", "title": "Two Birds, One Stone: Jointly Learning Binary Code for Large-Scale Face Image Retrieval and Attributes Prediction", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.435"}, {"id": "fe961cbe4be0a35becd2d722f9f364ec3c26bd34", "title": "Computer-based Tracking, Analysis, and Visualization of Linguistically Significant Nonmanual Events in American Sign Language (ASL)", "addresses": [{"address": "Boston University", "lat": "42.35042530", "lng": "-71.10056114", "type": "edu"}, {"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/fe96/1cbe4be0a35becd2d722f9f364ec3c26bd34.pdf"}, {"id": "b98e7a8f605c21e25ac5e32bfb1851a01f30081b", "title": "Deep nonlinear metric learning with independent subspace analysis for face verification", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2012, "pdf": "http://doi.acm.org/10.1145/2393347.2396303"}, {"id": "3d89f9b4da3d6fb1fdb33dea7592b5992069a096", "title": "Face recognition based on convolution siamese networks", "addresses": [{"address": "University of the Chinese Academy of Sciences", "lat": "30.57281500", "lng": "104.06680100", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/CISP-BMEI.2017.8302003"}, {"id": "e51927b125640bfc47bbf1aa00c3c026748c75bd", "title": "Automatic Facial Image Annotation and Retrieval by Integrating Voice Label and Visual Appearance", "addresses": [{"address": "National Taiwan University", "lat": "25.01682835", "lng": "121.53846924", "type": "edu"}], "year": 2014, "pdf": "http://doi.acm.org/10.1145/2647868.2655015"}, {"id": "4e8c608fc4b8198f13f8a68b9c1a0780f6f50105", "title": "How Related Exemplars Help Complex Event Detection in Web Videos?", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}, {"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Yang_How_Related_Exemplars_2013_ICCV_paper.pdf"}, {"id": "ce8db0fe11e7c96d08de561506f9f8f399dabbb2", "title": "Weighted sparse representation using a learned distance metric for face recognition", "addresses": [{"address": "Korea University", "lat": "37.59014110", "lng": "127.03623180", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICIP.2015.7351677"}, {"id": "2836d68c86f29bb87537ea6066d508fde838ad71", "title": "Personalized Age Progression with Aging Dictionary", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2015, "pdf": "http://arxiv.org/pdf/1510.06503v1.pdf"}, {"id": "3f7cf52fb5bf7b622dce17bb9dfe747ce4a65b96", "title": "Person Identity Label Propagation in Stereo Videos", "addresses": [{"address": "Aristotle University of Thessaloniki", "lat": "40.62984145", "lng": "22.95889350", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/TMM.2014.2315595"}, {"id": "dac34b590adddef2fc31f26e2aeb0059115d07a1", "title": "House in the (Biometric) Cloud: A Possible Application", "addresses": [{"address": "Sapienza University of Rome", "lat": "41.90376260", "lng": "12.51443840", "type": "edu"}, {"address": "Sapienza Univertsity of Rome", "lat": "41.90376260", "lng": "12.51443840", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8436078"}, {"id": "210b98394c3be96e7fd75d3eb11a391da1b3a6ca", "title": "Spatiotemporal Derivative Pattern: A Dynamic Texture Descriptor for Video Matching", "addresses": [{"address": "University of Western Australia", "lat": "-31.95040445", "lng": "115.79790037", "type": "edu"}, {"address": "Islamic Azad University", "lat": "34.84529990", "lng": "48.55962120", "type": "edu"}, {"address": "Tafresh University", "lat": "34.68092465", "lng": "50.05341352", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/210b/98394c3be96e7fd75d3eb11a391da1b3a6ca.pdf"}, {"id": "125d82fee1b9fbcc616622b0977f3d06771fc152", "title": "Hierarchical face parsing via deep learning", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2012, "pdf": "http://www.ee.cuhk.edu.hk/~xgwang/papers/luoWTcvpr12.pdf"}, {"id": "ccfebdf7917cb50b5fcd56fb837f841a2246a149", "title": "A feature subtraction method for image based kinship verification under uncontrolled environments", "addresses": [{"address": "Aalborg University", "lat": "57.01590275", "lng": "9.97532827", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICIP.2015.7351065"}, {"id": "a538b05ebb01a40323997629e171c91aa28b8e2f", "title": "Rectified Linear Units Improve Restricted Boltzmann Machines", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/a538/b05ebb01a40323997629e171c91aa28b8e2f.pdf"}, {"id": "4cdb6144d56098b819076a8572a664a2c2d27f72", "title": "Face Synthesis for Eyeglass-Robust Face Recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.01196.pdf"}, {"id": "472ba8dd4ec72b34e85e733bccebb115811fd726", "title": "Cosine Similarity Metric Learning for Face Verification", "addresses": [{"address": "University of Nottingham", "lat": "52.93874280", "lng": "-1.20029569", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/472b/a8dd4ec72b34e85e733bccebb115811fd726.pdf"}, {"id": "6880013eb0b91a2b334e0be0dced0a1a79943469", "title": "Discrimination-aware Channel Pruning for Deep Neural Networks", "addresses": [{"address": "South China University of Technology", "lat": "23.05020420", "lng": "113.39880323", "type": "edu"}, {"address": "University of Texas at Arlington", "lat": "32.72836830", "lng": "-97.11201835", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.11809.pdf"}, {"id": "86afb1e38a96f2ac00e792ef353a971fd13c8474", "title": "How interesting images are: An atypicality approach for social networks", "addresses": [{"address": "University of Hawaii", "lat": "21.29827950", "lng": "-157.81869230", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/BigData.2016.7840742"}, {"id": "0b242d5123f79defd5f775d49d8a7047ad3153bc", "title": "How Important Is Weight Symmetry in Backpropagation?", "addresses": [{"address": "McGovern Institute for Brain Research", "lat": "42.36262950", "lng": "-71.09144810", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/84db/c0010ae4f5206d689cf9f5bb176d18990bcd.pdf"}, {"id": "e293a31260cf20996d12d14b8f29a9d4d99c4642", "title": "LR-GAN: Layered Recursive Generative Adversarial Networks for Image Generation", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1703.01560.pdf"}, {"id": "6f9824c5cb5ac08760b08e374031cbdabc953bae", "title": "Unconstrained human identification using comparative facial soft biometrics", "addresses": [{"address": "University of Southampton", "lat": "50.89273635", "lng": "-1.39464295", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/BTAS.2016.7791206"}, {"id": "21258aa3c48437a2831191b71cd069c05fb84cf7", "title": "A Robust and Efficient Doubly Regularized Metric Learning Approach", "addresses": [{"address": "University of Florida", "lat": "29.63287840", "lng": "-82.34901330", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/2125/8aa3c48437a2831191b71cd069c05fb84cf7.pdf"}, {"id": "78d645d5b426247e9c8f359694080186681f57db", "title": "Gender Classification by LUT Based Boosting of Overlapping Block Patterns", "addresses": [{"address": "IDIAP Research Institute", "lat": "46.10923700", "lng": "7.08453549", "type": "edu"}, {"address": "Tampere University of Technology", "lat": "61.44964205", "lng": "23.85877462", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/78d6/45d5b426247e9c8f359694080186681f57db.pdf"}, {"id": "2a35d20b2c0a045ea84723f328321c18be6f555c", "title": "Boost Picking: A Universal Method on Converting Supervised Classification to Semi-supervised Classification", "addresses": [{"address": "Beijing Institute of Technology", "lat": "39.95866520", "lng": "116.30971281", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d1be/cba3c460892453939f9f3639d8beddf2a133.pdf"}, {"id": "5ef3e7a2c8d2876f3c77c5df2bbaea8a777051a7", "title": "Rendering or normalization? An analysis of the 3D-aided pose-invariant face recognition", "addresses": [{"address": "University of Houston", "lat": "29.72079020", "lng": "-95.34406271", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ISBA.2016.7477244"}, {"id": "9be653e1bc15ef487d7f93aad02f3c9552f3ee4a", "title": "Computer Vision for Head Pose Estimation: Review of a Competition", "addresses": [{"address": "Tampere University of Technology", "lat": "61.44964205", "lng": "23.85877462", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/9be6/53e1bc15ef487d7f93aad02f3c9552f3ee4a.pdf"}, {"id": "96e0cfcd81cdeb8282e29ef9ec9962b125f379b0", "title": "The MegaFace Benchmark: 1 Million Faces for Recognition at Scale", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527"}, {"id": "3d0c21d4780489bd624a74b07e28c16175df6355", "title": "Deep or Shallow Facial Descriptors? A Case for Facial Attribute Classification and Face Retrieval", "addresses": [{"address": "Multimedia University", "lat": "2.92749755", "lng": "101.64185301", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/3d0c/21d4780489bd624a74b07e28c16175df6355.pdf"}, {"id": "48906f609446afcdaacbe1d65770d7a6165a8eee", "title": "Storages Are Not Forever", "addresses": [{"address": "RWTH Aachen University", "lat": "50.77917030", "lng": "6.06728733", "type": "edu"}, {"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1007/s12559-017-9482-4"}, {"id": "31f905d40a4ac3c16c91d5be8427762fa91277f1", "title": "Learning Rotation-Invariant Local Binary Descriptor", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2704661"}, {"id": "0f64e26d6dd6f1c99fe2050887fac26cafe9ed60", "title": "Bridging the Gap Between Forensics and Biometric-Enabled Watchlists for e-Borders", "addresses": [{"address": "University of Calgary", "lat": "51.07840380", "lng": "-114.12870770", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/MCI.2016.2627668"}, {"id": "2969f822b118637af29d8a3a0811ede2751897b5", "title": "Cascaded Shape Space Pruning for Robust Facial Landmark Detection", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2013, "pdf": "http://iip.ict.ac.cn/sites/default/files/publication/2013_ICCV_xwzhao_Cascaded%20Shape%20Space%20Pruning%20for%20Robust%20Facial%20Landmark%20Detection.pdf"}, {"id": "4934d44aa89b6d871eb6709dd1d1eebf16f3aaf1", "title": "A Deep Sum-Product Architecture for Robust Facial Attributes Analysis", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Luo_A_Deep_Sum-Product_2013_ICCV_paper.pdf"}, {"id": "134f1cee8408cca648d8b4ca44b38b0a7023af71", "title": "Partially Shared MultiTask Convolutional Neural Network with Local Constraint for Face Attribute Learning", "addresses": [{"address": "Zhejiang University", "lat": "30.19331415", "lng": "120.11930822", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/134f/1cee8408cca648d8b4ca44b38b0a7023af71.pdf"}, {"id": "d77f18917a58e7d4598d31af4e7be2762d858370", "title": "Detecting person presence in TV shows with linguistic and structural features", "addresses": [{"address": "Aix Marseille University, France", "lat": "43.29362100", "lng": "5.35806600", "type": "edu"}, {"address": "Orange Labs, Lannion, France", "lat": "48.75416800", "lng": "-3.45845860", "type": "company"}], "year": "2012", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6289062"}, {"id": "bd8f3fef958ebed5576792078f84c43999b1b207", "title": "BUAA-iCC at ImageCLEF 2015 Scalable Concept Image Annotation Challenge", "addresses": [{"address": "Beihang University", "lat": "39.98083330", "lng": "116.34101249", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/bd8f/3fef958ebed5576792078f84c43999b1b207.pdf"}, {"id": "c51fbd2574e488e486483e39702a3d7754cc769b", "title": "Face Recognition from Still Images to Video Sequences: A Local-Feature-Based Framework", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": "2011", "pdf": "https://pdfs.semanticscholar.org/c51f/bd2574e488e486483e39702a3d7754cc769b.pdf"}, {"id": "b13e2e43672e66ba45d1b852a34737e4ce04226b", "title": "Face Painting: querying art with photos", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/3552/4e63c11f13fe08b2996a7bc0a9105e7c407b.pdf"}, {"id": "65b1760d9b1541241c6c0222cc4ee9df078b593a", "title": "Enhanced Pictorial Structures for Precise Eye Localization Under Uncontrolled Conditions", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/65b1/760d9b1541241c6c0222cc4ee9df078b593a.pdf"}, {"id": "518a3ce2a290352afea22027b64bf3950bffc65a", "title": "Finding iconic images", "addresses": [{"address": "Columbia University", "lat": "40.84198360", "lng": "-73.94368971", "type": "edu"}], "year": "2009", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5204174"}, {"id": "c87f7ee391d6000aef2eadb49f03fc237f4d1170", "title": "A real-time and unsupervised face Re-Identification system for Human-Robot Interaction", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1804.03547.pdf"}, {"id": "14b87359f6874ff9b8ee234b18b418e57e75b762", "title": "Face Alignment Using a Ranking Model based on Regression Trees", "addresses": [{"address": "Istanbul Technical University", "lat": "41.10427915", "lng": "29.02231159", "type": "edu"}, {"address": "Karlsruhe Institute of Technology", "lat": "49.10184375", "lng": "8.43312560", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/1b62/6c14544f249cd52ef86a4efc17f3d3834003.pdf"}, {"id": "0e2d956790d3b8ab18cee8df6c949504ee78ad42", "title": "Scalable face image retrieval integrating multi-feature quantization and constrained reference re-ranking", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2013, "pdf": "https://doi.org/10.1109/IVCNZ.2013.6727024"}, {"id": "323f9ae6bdd2a4e4dce4168f7f7e19c70585c9b5", "title": "Empirically Analyzing the Effect of Dataset Biases on Deep Face Recognition Systems", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1712.01619.pdf"}, {"id": "1ce3a91214c94ed05f15343490981ec7cc810016", "title": "Exploring photobios", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}], "year": 2011, "pdf": "http://grail.cs.washington.edu/photobios/paper.pdf"}, {"id": "69eb6c91788e7c359ddd3500d01fb73433ce2e65", "title": "CAMGRAPH: Distributed Graph Processing for Camera Networks", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/69eb/6c91788e7c359ddd3500d01fb73433ce2e65.pdf"}, {"id": "2296d79753118cfcd0fecefece301557f4cb66e2", "title": "Exploring Disentangled Feature Representation Beyond Face Identification", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "SenseTime", "lat": "39.99300800", "lng": "116.32988200", "type": "company"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.03487.pdf"}, {"id": "53ce84598052308b86ba79d873082853022aa7e9", "title": "Optimized Method for Real-Time Face Recognition System Based on PCA and Multiclass Support Vector Machine", "addresses": [{"address": "Islamic Azad University", "lat": "34.84529990", "lng": "48.55962120", "type": "edu"}], "year": "2013", "pdf": "https://pdfs.semanticscholar.org/4f07/b70883a98a69be3b3e29de06c73e59a9ba0e.pdf"}, {"id": "a29a22878e1881d6cbf6acff2d0b209c8d3f778b", "title": "Benchmarking Still-to-Video Face Recognition via Partial and Local Linear Discriminant Analysis on COX-S2V Dataset", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/a29a/22878e1881d6cbf6acff2d0b209c8d3f778b.pdf"}, {"id": "052f994898c79529955917f3dfc5181586282cf8", "title": "Unsupervised Domain Adaptation for Face Recognition in Unlabeled Videos", "addresses": [{"address": "Dalian University of Technology", "lat": "38.88140235", "lng": "121.52281098", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.02191.pdf"}, {"id": "a0061dae94d916f60a5a5373088f665a1b54f673", "title": "Lensless computational imaging through deep learning", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/a006/1dae94d916f60a5a5373088f665a1b54f673.pdf"}, {"id": "809ea255d144cff780300440d0f22c96e98abd53", "title": "ArcFace: Additive Angular Margin Loss for Deep Face Recognition", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/809e/a255d144cff780300440d0f22c96e98abd53.pdf"}, {"id": "439ca6ded75dffa5ddea203dde5e621dc4a88c3e", "title": "Robust real-time performance-driven 3D face tracking", "addresses": [{"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICPR.2016.7899906"}, {"id": "9c59bb28054eee783a40b467c82f38021c19ff3e", "title": "Logistic similarity metric learning for face verification", "addresses": [{"address": "University of Lyon", "lat": "45.78332440", "lng": "4.87819840", "type": "edu"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7178311"}, {"id": "6043006467fb3fd1e9783928d8040ee1f1db1f3a", "title": "Face recognition with learning-based descriptor", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}, {"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2010, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2010.5539992"}, {"id": "9729930ab0f9cbcd07f1105bc69c540330cda50a", "title": "Compressing Fisher Vector for Robust Face Recognition", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ACCESS.2017.2749331"}, {"id": "3b9b200e76a35178da940279d566bbb7dfebb787", "title": "Learning Channel Inter-dependencies at Multiple Scales on Dense Networks for Face Recognition", "addresses": [{"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/3b9b/200e76a35178da940279d566bbb7dfebb787.pdf"}, {"id": "feea73095b1be0cbae1ad7af8ba2c4fb6f316d35", "title": "Deep Face Recognition with Center Invariant Loss", "addresses": [{"address": "Northeastern University", "lat": "42.33836680", "lng": "-71.08793524", "type": "edu"}], "year": "2017", "pdf": "http://dl.acm.org/citation.cfm?id=3126693"}, {"id": "54948ee407b5d32da4b2eee377cc44f20c3a7e0c", "title": "Right for the Right Reason: Training Agnostic Networks", "addresses": [{"address": "University of Bristol", "lat": "51.45848370", "lng": "-2.60977520", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.06296.pdf"}, {"id": "77869f274d4be4d4b4c438dbe7dff4baed521bd8", "title": "Face Recognition With Pose Variations and Misalignment via Orthogonal Procrustes Regression", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/TIP.2016.2551362"}, {"id": "0faee699eccb2da6cf4307ded67ba8434368257b", "title": "TAIGMAN: MULTIPLE ONE-SHOTS FOR UTILIZING CLASS LABEL INFORMATION 1 Multiple One-Shots for Utilizing Class Label Information", "addresses": [{"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}, {"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/2396/5bd9b557b04b2c81a35ee5c16951c0e420f3.pdf"}, {"id": "1eb9c859ff7537182a25556635954bcd11830822", "title": "Multi-features fusion based CRFs for face segmentation", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "Shanghai University", "lat": "31.32235655", "lng": "121.38400941", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICDSP.2015.7252004"}, {"id": "3cd9b0a61bdfa1bb8a0a1bf0369515a76ecd06e3", "title": "Distance Metric Learning with Eigenvalue Optimization", "addresses": [{"address": "University of Bristol", "lat": "51.45848370", "lng": "-2.60977520", "type": "edu"}, {"address": "University of Exeter", "lat": "50.73693020", "lng": "-3.53647672", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/51f7/3cfcc6d671bd99b5c3c512ff9b7bb959f33b.pdf"}, {"id": "f5aee1529b98136194ef80961ba1a6de646645fe", "title": "Large-scale learning of discriminative image representations", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/f5ae/e1529b98136194ef80961ba1a6de646645fe.pdf"}, {"id": "abdd17e411a7bfe043f280abd4e560a04ab6e992", "title": "Pose-Robust Face Recognition via Deep Residual Equivariant Mapping", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.00839.pdf"}, {"id": "d40c16285d762f7a1c862b8ac05a0fdb24af1202", "title": "Coarse-to-fine facial landmarks localization based on convolutional feature", "addresses": [{"address": "Beijing Jiaotong University", "lat": "39.94976005", "lng": "116.33629046", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/BESC.2017.8256378"}, {"id": "780c8a795baca1ba4cb4956cded877dd3d1ca313", "title": "Simulation of face recognition at a distance by scaling down images", "addresses": [{"address": "University of Louisville", "lat": "38.21675650", "lng": "-85.75725023", "type": "edu"}], "year": 2013, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ISSPIT.2013.6781879"}, {"id": "c222f8079c246ead285894c47bdbb2dfc7741044", "title": "Face de-identification with expressions preservation", "addresses": [{"address": "Bordeaux INP, France", "lat": "44.80557160", "lng": "-0.60519720", "type": "edu"}, {"address": "University of Bordeaux, France", "lat": "44.80837500", "lng": "-0.59670500", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICIP.2015.7351631"}, {"id": "2f2aa67c5d6dbfaf218c104184a8c807e8b29286", "title": "Video analytics for surveillance camera networks", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": 2013, "pdf": "http://sesame.comp.nus.edu.sg/components/com_flexicontent/uploads/lekhaicon13.pdf"}, {"id": "982d4f1dee188f662a4b5616a045d69fc5c21b54", "title": "Learning to link human objects in videos and advertisements with clothes retrieval", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}, {"address": "Northeastern University", "lat": "42.33836680", "lng": "-71.08793524", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/IJCNN.2016.7727859"}, {"id": "81a80b26979b40d5ebe3f5ba70b03cb9f19dd7a5", "title": "Histogram equalized deep PCA with ELM classification for expressive face recognition", "addresses": [{"address": "Khon Kaen University", "lat": "16.46007565", "lng": "102.81211798", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369725"}, {"id": "86fa086d02f424705bbea53943390f009191740a", "title": "Precise eye localization with improved SDM", "addresses": [{"address": "Samsung SAIT, Korea", "lat": "37.25202260", "lng": "127.05550190", "type": "company"}, {"address": "Samsung SAIT, Beijing", "lat": "39.90419990", "lng": "116.40739630", "type": "company"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICIP.2015.7351651"}, {"id": "a35ed55dc330d470be2f610f4822f5152fcac4e1", "title": "Tattoo recognition technology - challenge (Tatt-C): an open tattoo database for developing tattoo recognition research", "addresses": [{"address": "NIST", "lat": "39.14004000", "lng": "-77.21850600", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ISBA.2015.7126369"}, {"id": "3ba74755c530347f14ec8261996dd9eae896e383", "title": "A Low-Power Convolutional Neural Network Face Recognition Processor and a CIS Integrated With Always-on Face Detector", "addresses": [{"address": "Korea Advanced Institute of Science and Technology", "lat": "36.36971910", "lng": "127.36253700", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/JSSC.2017.2767705"}, {"id": "7fc3442c8b4c96300ad3e860ee0310edb086de94", "title": "Similarity Scores Based on Background Samples", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}, {"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/82f3/b7cacc15e026fd3a7639091d54162f6ae064.pdf"}, {"id": "42f6f5454dda99d8989f9814989efd50fe807ee8", "title": "Conditional generative adversarial nets for convolutional face generation", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/42f6/f5454dda99d8989f9814989efd50fe807ee8.pdf"}, {"id": "0aebe97a92f590bdf21cdadfddec8061c682cdb2", "title": "Probabilistic Elastic Part Model: A Pose-Invariant Representation for Real-World Face Verification", "addresses": [{"address": "Microsoft", "lat": "47.64233180", "lng": "-122.13693020", "type": "company"}], "year": 2018, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2017.2695183"}, {"id": "7ffef9f26c39377ee937d29b8990580266a7a8a5", "title": "Deep Metric Learning with Hierarchical Triplet Loss", "addresses": [{"address": "University of Hong Kong", "lat": "22.20814690", "lng": "114.25964115", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.06951.pdf"}, {"id": "7862f646d640cbf9f88e5ba94a7d642e2a552ec9", "title": "Being John Malkovich", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/7862/f646d640cbf9f88e5ba94a7d642e2a552ec9.pdf"}, {"id": "754626bd5fb06fee5e10962fdfeddd495513e84b", "title": "Facial expression pair matching", "addresses": [{"address": "Istanbul Technical University", "lat": "41.10427915", "lng": "29.02231159", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/SIU.2017.7960646"}, {"id": "e3c8e49ffa7beceffca3f7f276c27ae6d29b35db", "title": "Families in the Wild (FIW): Large-Scale Kinship Image Database and Benchmarks", "addresses": [{"address": "Northeastern University", "lat": "42.33836680", "lng": "-71.08793524", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1604.02182.pdf"}, {"id": "b306bd9b485c6a6c1e4550beb1910ed9b6585359", "title": "Learning generative models of mid-level structure in natural images", "addresses": [{"address": "University of Edinburgh", "lat": "55.94951105", "lng": "-3.19534913", "type": "edu"}], "year": "2012", "pdf": "https://pdfs.semanticscholar.org/b306/bd9b485c6a6c1e4550beb1910ed9b6585359.pdf"}, {"id": "aed6af12148b43e4a24ee6e2bc3604ca59bd99a5", "title": "Discriminative Deep Metric Learning for Face and Kinship Verification", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2717505"}, {"id": "ffe4bb47ec15f768e1744bdf530d5796ba56cfc1", "title": "AFIF4: Deep Gender Classification based on AdaBoost-based Fusion of Isolated Facial Features and Foggy Faces", "addresses": [{"address": "York University", "lat": "43.77439110", "lng": "-79.50481085", "type": "edu"}, {"address": "Assiut University", "lat": "27.18794105", "lng": "31.17009498", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1706.04277.pdf"}, {"id": "467b602a67cfd7c347fe7ce74c02b38c4bb1f332", "title": "Large Margin Local Metric Learning", "addresses": [{"address": "University College London", "lat": "51.52316070", "lng": "-0.12820370", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/467b/602a67cfd7c347fe7ce74c02b38c4bb1f332.pdf"}, {"id": "9887ab220254859ffc7354d5189083a87c9bca6e", "title": "Generic Image Classification Approaches Excel on Face Recognition", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}, {"address": "University of Adelaide", "lat": "-34.91892260", "lng": "138.60423668", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/9887/ab220254859ffc7354d5189083a87c9bca6e.pdf"}, {"id": "0034e37a0faf0f71395245b266aacbf5412f190a", "title": "Face Distortion Recovery Based on Online Learning Database for Conversational Video", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/TMM.2014.2355134"}, {"id": "fe0cf8eaa5a5f59225197ef1bb8613e603cd96d4", "title": "Improved Face Verification with Simple Weighted Feature Combination", "addresses": [{"address": "Tongji University", "lat": "31.28473925", "lng": "121.49694909", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/4e20/8cfff33327863b5aeef0bf9b327798a5610c.pdf"}, {"id": "e20e2db743e8db1ff61279f4fda32bf8cf381f8e", "title": "Deep Cross Polarimetric Thermal-to-Visible Face Recognition", "addresses": [{"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1801.01486.pdf"}, {"id": "9b4d2cd2e5edbf5c8efddbdcce1db9a02a853534", "title": "Exponential Discriminant Locality Preserving Projection for face recognition", "addresses": [{"address": "Jiangsu University of Science and Technology", "lat": "32.19805500", "lng": "119.46326791", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1016/j.neucom.2016.02.063"}, {"id": "7783095a565094ae5b3dccf082d504ddd7255a5c", "title": "\"Wow! you are so beautiful today!\"", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": "2013", "pdf": "http://dl.acm.org/citation.cfm?id=2502258"}, {"id": "fff31548617f208cd5ae5c32917afd48abc4ff6a", "title": "Mobile situated analytics of ego-centric network data", "addresses": [{"address": "Hong Kong University of Science and Technology", "lat": "22.33863040", "lng": "114.26203370", "type": "edu"}], "year": 2017, "pdf": "http://doi.acm.org/10.1145/3139295.3139309"}, {"id": "0de1450369cb57e77ef61cd334c3192226e2b4c2", "title": "In defense of low-level structural features and SVMs for facial attribute classification: Application to detection of eye state, Mouth State, and eyeglasses in the wild", "addresses": [{"address": "Virginia Polytechnic Institute and State University", "lat": "37.21872455", "lng": "-80.42542519", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/BTAS.2017.8272747"}, {"id": "15cf7bdc36ec901596c56d04c934596cf7b43115", "title": "Face Extraction from Image based on K-Means Clustering Algorithms", "addresses": [{"address": "Islamic Azad University", "lat": "34.84529990", "lng": "48.55962120", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/15cf/7bdc36ec901596c56d04c934596cf7b43115.pdf"}, {"id": "3a49507c46a2b8c6411809c81ac47b2b1d2282c3", "title": "Exploring joint encoding of multi-direction local binary patterns for image classification", "addresses": [{"address": "Chongqing University", "lat": "29.50841740", "lng": "106.57858552", "type": "edu"}], "year": "2017", "pdf": "http://doi.org/10.1007/s11042-017-5319-0"}, {"id": "b81cae2927598253da37954fb36a2549c5405cdb", "title": "Experiments on Visual Information Extraction with the Faces of Wikipedia", "addresses": [{"address": "Polytechnique Montreal", "lat": "45.50438400", "lng": "-73.61288290", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/d892/753827950a227179b691e6df85820ab7c417.pdf"}, {"id": "58d76380d194248b3bb291b8c7c5137a0a376897", "title": "FaceID-GAN : Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/58d7/6380d194248b3bb291b8c7c5137a0a376897.pdf"}, {"id": "5dd57b7e0e82a33420c054da7ea3f435d49e910e", "title": "Matching and Perturbation Theories for Affine-Invariant Shapes Using QR Factorization with Column Pivoting", "addresses": [{"address": "Beihang University", "lat": "39.98083330", "lng": "116.34101249", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1007/s10851-014-0493-4"}, {"id": "110919f803740912e02bb7e1424373d325f558a9", "title": "Statistical Inference of Gaussian-Laplace Distribution for Person Verification", "addresses": [{"address": "China University of Geosciences", "lat": "30.52715100", "lng": "114.40076200", "type": "edu"}, {"address": "National Institute of Informatics, Japan", "lat": "35.69248530", "lng": "139.75825330", "type": "edu"}, {"address": "Wuhan University of Technology", "lat": "30.60903415", "lng": "114.35142840", "type": "edu"}], "year": 2017, "pdf": "http://doi.acm.org/10.1145/3123266.3123421"}, {"id": "537328af75f50d49696972a6c34bca97c14bc762", "title": "Exploiting Unintended Feature Leakage in Collaborative Learning", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.04049.pdf"}, {"id": "9825c4dddeb2ed7eaab668b55403aa2c38bc3320", "title": "Aerial Imagery for Roof Segmentation: A Large-Scale Dataset towards Automatic Mapping of Buildings", "addresses": [{"address": "University of Waterloo", "lat": "43.47061295", "lng": "-80.54724732", "type": "edu"}, {"address": "University of Tokyo", "lat": "35.90204480", "lng": "139.93622009", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.09532.pdf"}, {"id": "7a65fc9e78eff3ab6062707deaadde024d2fad40", "title": "A Study on Apparent Age Estimation", "addresses": [{"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Zhu_A_Study_on_ICCV_2015_paper.pdf"}, {"id": "0750c796467b6ef60b0caff5fb199337d54d431e", "title": "Face detection method based on histogram of sparse code in tree deformable model", "addresses": [{"address": "Chongqing University of Posts and Telecommunications", "lat": "29.53570460", "lng": "106.60482474", "type": "edu"}, {"address": "University of North Carolina Wilmington", "lat": "34.23755810", "lng": "-77.92701290", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICMLC.2016.7873015"}, {"id": "e66a6ae542907d6a0ebc45da60a62d3eecf17839", "title": "3D-aided face recognition from videos", "addresses": [{"address": "Morpho, SAFRAN Group, France", "lat": "48.82250670", "lng": "2.26875410", "type": "company"}, {"address": "University of Lyon", "lat": "45.78332440", "lng": "4.87819840", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/EUVIP.2014.7018366"}, {"id": "ae425a2654a1064c2eda29b08a492c8d5aab27a2", "title": "An incremental face recognition system based on deep learning", "addresses": [{"address": "National University of Defense Technology, China", "lat": "28.22902090", "lng": "112.99483204", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.23919/MVA.2017.7986845"}, {"id": "39c10888a470b92b917788c57a6fd154c97b421c", "title": "Joint multi-feature fusion and attribute relationships for facial attribute prediction", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/VCIP.2017.8305036"}, {"id": "d04d5692461d208dd5f079b98082eda887b62323", "title": "Subspace learning with frequency regularizer: Its application to face recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2015, "pdf": "http://www.cbsr.ia.ac.cn/users/zlei/papers/ICB2015/ZLEI-ICB-15.pdf"}, {"id": "e908ce44fa94bb7ecf2a8b70cb5ec0b1a00b311a", "title": "Topology preserving graph matching for partial face recognition", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019548"}, {"id": "84574aa43a98ad8a29470977e7b091f5a5ec2366", "title": "Latent max-margin metric learning for comparing video face tubes", "addresses": [{"address": "Max Planck Institute for Informatics", "lat": "49.25795660", "lng": "7.04577417", "type": "edu"}, {"address": "Technicolor, France", "lat": "48.83153300", "lng": "2.28066283", "type": "edu"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7301321"}, {"id": "9103148dd87e6ff9fba28509f3b265e1873166c9", "title": "Face Analysis using 3D Morphable Models", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/9103/148dd87e6ff9fba28509f3b265e1873166c9.pdf"}, {"id": "32c20afb5c91ed7cdbafb76408c3a62b38dd9160", "title": "Viewing Real-World Faces in 3D", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Hassner_Viewing_Real-World_Faces_2013_ICCV_paper.pdf"}, {"id": "f5eb0cf9c57716618fab8e24e841f9536057a28a", "title": "Rethinking Feature Distribution for Loss Functions in Image Classification", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.02988.pdf"}, {"id": "b8bcf9c773da1c5ee76db4bf750c9ff5d159f1a0", "title": "Homemade TS-Net for Automatic Face Recognition", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": 2016, "pdf": "http://doi.acm.org/10.1145/2911996.2911999"}, {"id": "24e82eaf3257e761d6ca0ffcc2cbca30dfca82e9", "title": "An analysis of the robustness of deep face recognition networks to noisy training labels", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}, {"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/GlobalSIP.2016.7906030"}, {"id": "4b9b30066a05bdeb0e05025402668499ebf99a6b", "title": "Real-time face detection using Gentle AdaBoost algorithm and nesting cascade structure", "addresses": [{"address": "Huaqiao University", "lat": "24.60047120", "lng": "118.08165740", "type": "edu"}], "year": 2012, "pdf": "https://doi.org/10.1109/ISPACS.2012.6473448"}, {"id": "23e824d1dfc33f3780dd18076284f07bd99f1c43", "title": "Spoofing faces using makeup: An investigative study", "addresses": [{"address": "INRIA M\u00e9diterran\u00e9e", "lat": "43.61581310", "lng": "7.06838000", "type": "edu"}, {"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686"}, {"id": "612075999e82596f3b42a80e6996712cc52880a3", "title": "CNNs with cross-correlation matching for face recognition in video surveillance using a single training sample per person", "addresses": [{"address": "University of Texas at Arlington", "lat": "32.72836830", "lng": "-97.11201835", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AVSS.2017.8078554"}, {"id": "6bfb0f8dd1a2c0b44347f09006dc991b8a08559c", "title": "Multiview discriminative learning for age-invariant face recognition", "addresses": [{"address": "Lomonosov Moscow State University", "lat": "55.70229715", "lng": "37.53179777", "type": "edu"}, {"address": "Advanced Digital Sciences Center, Singapore", "lat": "1.30372570", "lng": "103.77377630", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}, {"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": 2013, "pdf": "https://www.computer.org/web/csdl/index/-/csdl/proceedings/fg/2013/5545/00/06553724.pdf"}, {"id": "c58b7466f2855ffdcff1bebfad6b6a027b8c5ee1", "title": "Ultra-Resolving Face Images by Discriminative Generative Networks", "addresses": [{"address": "Australian National University", "lat": "-35.27769990", "lng": "149.11852700", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d6f1/42f5ddcb027e7b346eb20703abbf5cc4e883.pdf"}, {"id": "d8288322f32ee4501cef5a9b667e5bb79ebd7018", "title": "Facing scalability: Naming faces in an online social network", "addresses": [{"address": "University of Twente", "lat": "52.23801390", "lng": "6.85667610", "type": "edu"}], "year": 2012, "pdf": "https://doi.org/10.1016/j.patcog.2011.12.018"}, {"id": "de7f5e4ccc2f38e0c8f3f72a930ae1c43e0fdcf0", "title": "Merge or Not? Learning to Group Faces via Imitation Learning", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1707.03986.pdf"}, {"id": "eb87151fd2796ff5b4bbcf1906d41d53ac6c5595", "title": "Enhanced face detection using body part detections for wearable cameras", "addresses": [{"address": "IBM Thomas J. Watson Research Center", "lat": "41.21002475", "lng": "-73.80407056", "type": "company"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICPR.2016.7899719"}, {"id": "edb5813a32ce1167feb263ca2803d0ae934d902c", "title": "Invisible Steganography via Generative Adversarial Networks", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.08571.pdf"}, {"id": "7ca7255c2e0c86e4adddbbff2ce74f36b1dc522d", "title": "Stereo Matching for Unconstrained Face Recognition Ph . D . Proposal", "addresses": [{"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2009", "pdf": "https://pdfs.semanticscholar.org/7ca7/255c2e0c86e4adddbbff2ce74f36b1dc522d.pdf"}, {"id": "53bfe2ab770e74d064303f3bd2867e5bf7b86379", "title": "Learning to Synthesize and Manipulate Natural Images", "addresses": [{"address": "University of California, Berkeley", "lat": "37.86871260", "lng": "-122.25586815", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/d989/c3064d49bf8e63587ada4ed2bdb0d32b120a.pdf"}, {"id": "1a46d3a9bc1e4aff0ccac6403b49a13c8a89fc1d", "title": "Online robust image alignment via iterative convex optimization", "addresses": [{"address": "Purdue University", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu"}, {"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}, {"address": "Temple University", "lat": "39.95472495", "lng": "-75.15346905", "type": "edu"}], "year": 2012, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247878"}, {"id": "3dfb822e16328e0f98a47209d7ecd242e4211f82", "title": "Cross-Age LFW: A Database for Studying Cross-Age Face Recognition in Unconstrained Environments", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.08197.pdf"}, {"id": "27dafedccd7b049e87efed72cabaa32ec00fdd45", "title": "Unsupervised visual alignment with similarity graphs", "addresses": [{"address": "Tampere University of Technology", "lat": "61.44964205", "lng": "23.85877462", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2A_074.pdf"}, {"id": "e9c008d31da38d9eef67a28d2c77cb7daec941fb", "title": "Noisy Softmax: Improving the Generalization Ability of DCNN via Postponing the Early Softmax Saturation", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.03769.pdf"}, {"id": "39f525f3a0475e6bbfbe781ae3a74aca5b401125", "title": "Deep Joint Face Hallucination and Recognition", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/39f5/25f3a0475e6bbfbe781ae3a74aca5b401125.pdf"}, {"id": "88cd4209db62a34d9cba0b9cbe9d45d1e57d21e5", "title": "Runtime Neural Pruning", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/88cd/4209db62a34d9cba0b9cbe9d45d1e57d21e5.pdf"}, {"id": "3b75681f0162752865d85befd8b15e7d954ebfe6", "title": "Evaluation of a face recognition system performance's variation on a citizen passports database", "addresses": [{"address": "Universidad de la Rep\u00fablica, Uruguay", "lat": "-34.91817060", "lng": "-56.16657250", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/CLEI.2014.6965097"}, {"id": "ed9d11e995baeec17c5d2847ec1a8d5449254525", "title": "Efficient Gender Classification Using a Deep LDA-Pruned Net", "addresses": [{"address": "McGill University", "lat": "45.50397610", "lng": "-73.57496870", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/ed9d/11e995baeec17c5d2847ec1a8d5449254525.pdf"}, {"id": "21d1315761131ea6b3e2afe7a745b606341616fd", "title": "Generative Adversarial Network with Spatial Attention for Face Attribute Editing", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/21d1/315761131ea6b3e2afe7a745b606341616fd.pdf"}, {"id": "13a994d489c15d440c1238fc1ac37dad06dd928c", "title": "Learning Discriminant Face Descriptor for Face Recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/13a9/94d489c15d440c1238fc1ac37dad06dd928c.pdf"}, {"id": "96f0e7416994035c91f4e0dfa40fd45090debfc5", "title": "Unsupervised Learning of Face Representations", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.01260.pdf"}, {"id": "0141cb33c822e87e93b0c1bad0a09db49b3ad470", "title": "Unconstrained 3D face reconstruction", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298876"}, {"id": "d2f2b10a8f29165d815e652f8d44955a12d057e6", "title": "Multiscale binarised statistical image features for symmetric face matching using multiple descriptor fusion based on class-specific LDA", "addresses": [{"address": "Urmia University", "lat": "37.52914535", "lng": "45.04886077", "type": "edu"}], "year": "2015", "pdf": "http://doi.org/10.1007/s10044-015-0475-1"}, {"id": "582edc19f2b1ab2ac6883426f147196c8306685a", "title": "Do We Really Need to Collect Millions of Faces for Effective Face Recognition?", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/be6c/db7b181e73f546d43cf2ab6bc7181d7d619b.pdf"}, {"id": "2d3c17ced03e4b6c4b014490fe3d40c62d02e914", "title": "Video-driven state-aware facial animation", "addresses": [{"address": "Zhejiang University", "lat": "30.19331415", "lng": "120.11930822", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/2d3c/17ced03e4b6c4b014490fe3d40c62d02e914.pdf"}, {"id": "cd55fb30737625e86454a2861302b96833ed549d", "title": "Annotating Unconstrained Face Imagery: A scalable approach", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}, {"address": "Noblis, Falls Church, VA, U.S.A.", "lat": "38.95187000", "lng": "-77.36325900", "type": "company"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139094"}, {"id": "78f08685d44b6c6f82983d9b0f9c6ac2f7203a5e", "title": "An Adaptive Ensemble Approach to Ambient Intelligence Assisted People Search", "addresses": [{"address": "Tongji University", "lat": "31.28473925", "lng": "121.49694909", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/78f0/8685d44b6c6f82983d9b0f9c6ac2f7203a5e.pdf"}, {"id": "c4d0d09115a0df856cdb389fbccb20f62b07b14e", "title": "Environment coupled metrics learning for unconstrained face verification", "addresses": [{"address": "Chinese Academy of Science", "lat": "39.90419990", "lng": "116.40739630", "type": "edu"}], "year": 2012, "pdf": "https://doi.org/10.1109/ICIP.2012.6466925"}, {"id": "955e2a39f51c0b6f967199942d77625009e580f9", "title": "Naming Faces on the Web", "addresses": [{"address": "Bilkent University", "lat": "39.87204890", "lng": "32.75395155", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/955e/2a39f51c0b6f967199942d77625009e580f9.pdf"}, {"id": "fdd19fee07f2404952e629cc7f7ffaac14febe01", "title": "Face recognition based on dictionary learning with the locality constraints of atoms", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}, {"address": "Guangdong Polytechnic Normal University", "lat": "23.13170700", "lng": "113.37164300", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/CISP-BMEI.2016.7852754"}, {"id": "e39a66a6d1c5e753f8e6c33cd5d335f9bc9c07fa", "title": "Weakly Supervised Learning for Unconstrained Face Processing", "addresses": [{"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}], "year": "2014", "pdf": "https://pdfs.semanticscholar.org/e39a/66a6d1c5e753f8e6c33cd5d335f9bc9c07fa.pdf"}, {"id": "051f03bc25ec633592aa2ff5db1d416b705eac6c", "title": "Partial face recognition: An alignment free approach", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2011, "pdf": "http://www.cse.msu.edu/biometrics/Publications/Face/LiaoJain_PartialFR_AlignmentFreeApproach_ICJB11.pdf"}, {"id": "f6e6b4d0b7c16112dcb71ff502033a2187b1ec9b", "title": "Understanding Blooming Human Groups in Social Networks", "addresses": [{"address": "Hefei University of Technology", "lat": "31.84691800", "lng": "117.29053367", "type": "edu"}, {"address": "University of Texas at San Antonio", "lat": "29.58333105", "lng": "-98.61944505", "type": "edu"}, {"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/TMM.2015.2476657"}, {"id": "a961f1234e963a7945fed70197015678149b37d8", "title": "Facial Expression Synthesis by U-Net Conditional Generative Adversarial Networks", "addresses": [{"address": "Beihang University", "lat": "39.98083330", "lng": "116.34101249", "type": "edu"}], "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3206068"}, {"id": "660c99ac408b535bb0468ab3708d0d1d5db30180", "title": "An improved redundant dictionary based on sparse representation for face recognition", "addresses": [{"address": "China University of Mining and Technology", "lat": "34.21525380", "lng": "117.13985410", "type": "edu"}], "year": "2015", "pdf": "http://doi.org/10.1007/s11042-015-3083-6"}, {"id": "c444c4dab97dd6d6696f56c1cacda051dde60448", "title": "Multiview Face Detection and Registration Requiring Minimal Manual Intervention", "addresses": [{"address": "A*STAR, Singapore", "lat": "1.29889260", "lng": "103.78731070", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2013, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.37"}, {"id": "1aeef2ab062c27e0dbba481047e818d4c471ca57", "title": "Analyzing impact of image scaling algorithms on viola-jones face detection framework", "addresses": [{"address": "Central Electronics Research Institute, Pilani, India", "lat": "28.36561930", "lng": "75.58349530", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICACCI.2015.7275860"}, {"id": "d6ae7941dcec920d5726d50d1b1cdfe4dde34d35", "title": "Avatar digitization from a single image for real-time rendering", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": "2017", "pdf": "http://dl.acm.org/citation.cfm?id=31310887"}, {"id": "fba95853ca3135cc52a4b2bc67089041c2a9408c", "title": "Disguised Faces in the Wild", "addresses": [{"address": "Indian Institute of Technology Delhi", "lat": "28.54632595", "lng": "77.27325504", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/fba9/5853ca3135cc52a4b2bc67089041c2a9408c.pdf"}, {"id": "4512b87d68458d9ba0956c0f74b60371b6c69df4", "title": "SuperPatchMatch: An Algorithm for Robust Correspondences Using Superpixel Patches", "addresses": [{"address": "University of Bordeaux", "lat": "44.80837500", "lng": "-0.59670500", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2708504"}, {"id": "38a9ca2c49a77b540be52377784b9f734e0417e4", "title": "Face verification using large feature sets and one shot similarity", "addresses": [{"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}, {"address": "University of Campinas", "lat": "-27.59539950", "lng": "-48.61542180", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2011, "pdf": "http://homepages.dcc.ufmg.br/~william/papers/paper_2011_IJCB_Faces.pdf"}, {"id": "ac2881bdf7b57dc1672a17b221d68a438d79fce8", "title": "Learning a High Fidelity Pose Invariant Model for High-resolution Face Frontalization", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.08472.pdf"}, {"id": "69adbfa7b0b886caac15ebe53b89adce390598a3", "title": "Face hallucination using cascaded super-resolution and identity priors", "addresses": [{"address": "University of Ljubljana", "lat": "46.05015580", "lng": "14.46907327", "type": "edu"}, {"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.10938.pdf"}, {"id": "8686b15802529ff8aea50995ef14079681788110", "title": "Deformed Graph Laplacian for Semisupervised Learning", "addresses": [{"address": "University of Technology", "lat": "-33.88405040", "lng": "151.19922540", "type": "edu"}, {"address": "Shanghai Jiao Tong University", "lat": "31.20081505", "lng": "121.42840681", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/TNNLS.2014.2376936"}, {"id": "9f131b4e036208f2402182a1af2a59e3c5d7dd44", "title": "Face Retrieval Framework Relying on User's Visual Memory", "addresses": [{"address": "University of Tokyo", "lat": "35.90204480", "lng": "139.93622009", "type": "edu"}, {"address": "Waseda University", "lat": "33.88987280", "lng": "130.70856205", "type": "edu"}], "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3206038"}, {"id": "09ce14b84af2dc2f76ae1cf227356fa0ba337d07", "title": "Face reconstruction in the wild", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}], "year": 2011, "pdf": "http://grail.cs.washington.edu/3dfaces/paper.pdf"}, {"id": "d79365336115661b0e8dbbcd4b2aa1f504b91af6", "title": "Variational methods for conditional multimodal deep learning", "addresses": [{"address": "Indian Institute of Science Bangalore", "lat": "13.02223470", "lng": "77.56718325", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1603.01801.pdf"}, {"id": "7df4f96138a4e23492ea96cf921794fc5287ba72", "title": "A Jointly Learned Deep Architecture for Facial Attribute Analysis and Face Detection in the Wild", "addresses": [{"address": "Fudan University", "lat": "31.30104395", "lng": "121.50045497", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1707.08705.pdf"}, {"id": "6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4", "title": "Deep Learning Face Attributes in the Wild", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2015, "pdf": "http://arxiv.org/pdf/1411.7766v2.pdf"}, {"id": "dc6ad30c7a4bc79bb06b4725b16e202d3d7d8935", "title": "Age classification with deep learning face representation", "addresses": [{"address": "South China Normal University", "lat": "23.14319700", "lng": "113.34009651", "type": "edu"}, {"address": "South China University of Technology", "lat": "23.05020420", "lng": "113.39880323", "type": "edu"}], "year": "2017", "pdf": "http://doi.org/10.1007/s11042-017-4646-5"}, {"id": "bbcb4920b312da201bf4d2359383fb4ee3b17ed9", "title": "Robust Face Recognition via Multi-Scale Patch-Based Matrix Regression", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/bbcb/4920b312da201bf4d2359383fb4ee3b17ed9.pdf"}, {"id": "feb6e267923868bff6e2108603d00fdfd65251ca", "title": "Unsupervised Discovery of Visual Face Categories", "addresses": [{"address": "Southeast University", "lat": "32.05752790", "lng": "118.78682252", "type": "edu"}, {"address": "King Saud University", "lat": "24.72464030", "lng": "46.62335012", "type": "edu"}, {"address": "University of Nevada", "lat": "39.54694490", "lng": "-119.81346566", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/feb6/e267923868bff6e2108603d00fdfd65251ca.pdf"}, {"id": "8f6263e4d3775757e804796e104631c7a2bb8679", "title": "Characterizing Visual Representations within Convolutional Neural Networks: Toward a Quantitative Approach", "addresses": [{"address": "Harvard University", "lat": "42.36782045", "lng": "-71.12666653", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/8f62/63e4d3775757e804796e104631c7a2bb8679.pdf"}, {"id": "2ab034e1f54c37bfc8ae93f7320160748310dc73", "title": "Siamese Capsule Networks", "addresses": [{"address": "University of Liverpool", "lat": "53.40617900", "lng": "-2.96670819", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.07242.pdf"}, {"id": "1d1a7ef193b958f9074f4f236060a5f5e7642fc1", "title": "Ensemble of Patterns of Oriented Edge Magnitudes Descriptors For Face Recognition", "addresses": [{"address": "University of Bologna", "lat": "44.49623180", "lng": "11.35415700", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/db40/804914afbb7f8279ca9a4f52e0ade695f19e.pdf"}, {"id": "36486944b4feeb88c0499fecd253c5a53034a23f", "title": "Deep feature selection and projection for cross-age face retrieval", "addresses": [{"address": "Shanghai Jiao Tong University", "lat": "31.20081505", "lng": "121.42840681", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/CISP-BMEI.2017.8301986"}, {"id": "e7436b8e68bb7139b823a7572af3decd96241e78", "title": "A new approach for face detection with omnidirectional sensors", "addresses": [{"address": "IRSEEM Rouen, France", "lat": "49.44323200", "lng": "1.09997100", "type": "edu"}, {"address": "University of Rouen, France", "lat": "49.38497570", "lng": "1.06832570", "type": "edu"}], "year": 2011, "pdf": "https://doi.org/10.1109/ROBIO.2011.6181560"}, {"id": "96e318f8ff91ba0b10348d4de4cb7c2142eb8ba9", "title": "State-of-the-art face recognition performance using publicly available software and datasets", "addresses": [{"address": "Universit\u00e9 Paris-Saclay, France", "lat": "48.84760370", "lng": "2.26399340", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8364450"}, {"id": "e8523c4ac9d7aa21f3eb4062e09f2a3bc1eedcf7", "title": "Toward End-to-End Face Recognition Through Alignment Learning", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1701.07174.pdf"}, {"id": "405526dfc79de98f5bf3c97bf4aa9a287700f15d", "title": "MegaFace: A Million Faces for Recognition at Scale", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/8a6c/57fcd99a77982ec754e0b97fd67519ccb60c.pdf"}, {"id": "053b263b4a4ccc6f9097ad28ebf39c2957254dfb", "title": "Cost-Effective HITs for Relative Similarity Comparisons", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}, {"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/7a49/4b4489408ec3adea15817978ecd2e733f5fe.pdf"}, {"id": "82e66c4832386cafcec16b92ac88088ffd1a1bc9", "title": "OpenFace: A general-purpose face recognition library with mobile applications", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Poznan University of Technology", "lat": "52.40048370", "lng": "16.95158083", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/82e6/6c4832386cafcec16b92ac88088ffd1a1bc9.pdf"}, {"id": "8bbbdff11e88327816cad3c565f4ab1bb3ee20db", "title": "Automatic Semantic Face Recognition", "addresses": [{"address": "University of Southampton", "lat": "50.89273635", "lng": "-1.39464295", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.31"}, {"id": "2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d", "title": "Genealogical face recognition based on UB KinFace database", "addresses": [{"address": "SUNY Buffalo", "lat": "42.93362780", "lng": "-78.88394479", "type": "edu"}], "year": 2011, "pdf": "https://doi.org/10.1109/CVPRW.2011.5981801"}, {"id": "5f57a1a3a1e5364792b35e8f5f259f92ad561c1f", "title": "Implicit Sparse Code Hashing", "addresses": [{"address": "Institute of Information Science", "lat": "25.04107280", "lng": "121.61475620", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/5f57/a1a3a1e5364792b35e8f5f259f92ad561c1f.pdf"}, {"id": "1677d29a108a1c0f27a6a630e74856e7bddcb70d", "title": "Efficient Misalignment-Robust Representation for Real-Time Face Recognition", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/1677/d29a108a1c0f27a6a630e74856e7bddcb70d.pdf"}, {"id": "5f4219118556d2c627137827a617cf4e26242a6e", "title": "Explicit Shape Regression With Characteristic Number for Facial Landmark Localization", "addresses": [{"address": "Dalian University of Technology", "lat": "38.88140235", "lng": "121.52281098", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/TMM.2017.2751143"}, {"id": "84e6669b47670f9f4f49c0085311dce0e178b685", "title": "Face frontalization for Alignment and Recognition", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "University of Twente", "lat": "52.23801390", "lng": "6.85667610", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/84e6/669b47670f9f4f49c0085311dce0e178b685.pdf"}, {"id": "5213549200bccec57232fc3ff788ddf1043af7b3", "title": "Displaced dynamic expression regression for real-time facial tracking and animation", "addresses": [{"address": "Zhejiang University", "lat": "30.19331415", "lng": "120.11930822", "type": "edu"}], "year": 2014, "pdf": "http://doi.acm.org/10.1145/2601097.2601204"}, {"id": "5039834df68600a24e7e8eefb6ba44a5124e67fc", "title": "Modular hierarchical feature learning with deep neural networks for face verification", "addresses": [{"address": "Beijing Institute of Science and Technology Information", "lat": "40.04332040", "lng": "116.34181090", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2013, "pdf": "https://doi.org/10.1109/ICIP.2013.6738761"}, {"id": "44f48a4b1ef94a9104d063e53bf88a69ff0f55f3", "title": "Automatically Building Face Datasets of New Domains from Weakly Labeled Data with Pretrained Models", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/44f4/8a4b1ef94a9104d063e53bf88a69ff0f55f3.pdf"}, {"id": "6584c3c877400e1689a11ef70133daa86a238602", "title": "Supervised Committee of Convolutional Neural Networks in Automated Facial Expression Analysis", "addresses": [{"address": "Universitat Oberta de Catalunya", "lat": "41.40657415", "lng": "2.19453410", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8039231"}, {"id": "5763b09ebca9a756b4adebf74d6d7de27e80e298", "title": "Picture-specific cohort score normalization for face pair matching", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2013, "pdf": "https://doi.org/10.1109/BTAS.2013.6712738"}, {"id": "13bda03fc8984d5943ed8d02e49a779d27c84114", "title": "Efficient object detection using cascades of nearest convex model classifiers", "addresses": [{"address": "Eskisehir Osmangazi University", "lat": "39.74875160", "lng": "30.47653071", "type": "edu"}], "year": 2012, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6248047"}, {"id": "1e1d7cbbef67e9e042a3a0a9a1bcefcc4a9adacf", "title": "A Multi-level Contextual Model for Person Recognition in Photo Albums", "addresses": [{"address": "Stevens Institute of Technology", "lat": "40.74225200", "lng": "-74.02709490", "type": "edu"}], "year": 2016, "pdf": "http://personal.stevens.edu/~hli18//data/papers/CVPR2016_CameraReady.pdf"}, {"id": "47190d213caef85e8b9dd0d271dbadc29ed0a953", "title": "The Devil of Face Recognition is in the Noise", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}, {"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11649.pdf"}, {"id": "e66b4aa85524f493dafde8c75176ac0afad5b79c", "title": "Watchlist risk assessment using multiparametric cost and relative entropy", "addresses": [{"address": "University of Calgary", "lat": "51.07840380", "lng": "-114.12870770", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/SSCI.2017.8285219"}, {"id": "8af411697e73f6cfe691fe502d4bfb42510b4835", "title": "Dynamic Local Ternary Pattern for Face Recognition and Verification", "addresses": [{"address": "Hankuk University of Foreign Studies", "lat": "37.59539790", "lng": "127.06304990", "type": "edu"}, {"address": "Institute of Information Technology", "lat": "23.72898990", "lng": "90.39826820", "type": "edu"}, {"address": "University of Dhaka", "lat": "23.73169570", "lng": "90.39652750", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/8af4/11697e73f6cfe691fe502d4bfb42510b4835.pdf"}, {"id": "1d3dd9aba79a53390317ec1e0b7cd742cba43132", "title": "A maximum entropy feature descriptor for age invariant face recognition", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2015, "pdf": "http://www.cise.ufl.edu/~dihong/assets/Gong_A_Maximum_Entropy_2015_CVPR_paper.pdf"}, {"id": "539f55c0e2501c1d86791c8b54b225d9b3187b9c", "title": "Low-Rank Latent Pattern Approximation With Applications to Robust Image Classification", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2738560"}, {"id": "82ccd62f70e669ec770daf11d9611cab0a13047e", "title": "Sparse Variation Pattern for Texture Classification", "addresses": [{"address": "Azad University", "lat": "36.31734320", "lng": "50.03672860", "type": "edu"}, {"address": "Tafresh University", "lat": "34.68092465", "lng": "50.05341352", "type": "edu"}, {"address": "University of Western Australia", "lat": "-31.95040445", "lng": "115.79790037", "type": "edu"}], "year": 2013, "pdf": "http://www.csse.uwa.edu.au/~ajmal/papers/Farshid_DICTA2013.pdf"}, {"id": "1b5875dbebc76fec87e72cee7a5263d325a77376", "title": "Learnt Quasi-Transitive Similarity for Retrieval from Large Collections of Faces", "addresses": [{"address": "University of St Andrews", "lat": "56.34119840", "lng": "-2.79309380", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.528"}, {"id": "5632ba72b2652df3b648b2ee698233e76a4eee65", "title": "Reconstruction of 3D facial image using a single 2D image", "addresses": [{"address": "Xiamen University", "lat": "24.43994190", "lng": "118.09301781", "type": "edu"}, {"address": "University of Newcastle", "lat": "-33.35788990", "lng": "151.37834708", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8346387"}, {"id": "7c42371bae54050dbbf7ded1e7a9b4109a23a482", "title": "Optimized features selection using hybrid PSO-GA for multi-view gender classification", "addresses": [{"address": "Foundation University Rawalpindi Campus", "lat": "33.56095040", "lng": "73.07125966", "type": "edu"}, {"address": "University of Central Punjab", "lat": "31.44661490", "lng": "74.26797620", "type": "edu"}, {"address": "University of Dammam", "lat": "26.39793625", "lng": "50.19807924", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/7c42/371bae54050dbbf7ded1e7a9b4109a23a482.pdf"}, {"id": "d23ec100432d860b12308941f8539af82a28843f", "title": "Adversarial Semantic Scene Completion from a Single Depth Image", "addresses": [{"address": "Technical University Munich", "lat": "48.14955455", "lng": "11.56775314", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.10901.pdf"}, {"id": "43fe03ec1acb6ea9d05d2b22eeddb2631bd30437", "title": "Weakly supervised multiscale-inception learning for web-scale face recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296394"}, {"id": "93971a49ef6cc88a139420349a1dfd85fb5d3f5c", "title": "Scalable Probabilistic Models: Applied to Face Identification in the Wild", "addresses": [{"address": "IDIAP Research Institute", "lat": "46.10923700", "lng": "7.08453549", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/9397/1a49ef6cc88a139420349a1dfd85fb5d3f5c.pdf"}, {"id": "bf8a520533f401347e2f55da17383a3e567ef6d8", "title": "Bounded-Distortion Metric Learning", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/bf8a/520533f401347e2f55da17383a3e567ef6d8.pdf"}, {"id": "e9e40e588f8e6510fa5537e0c9e083ceed5d07ad", "title": "Fast Face Detection Using Graphics Processor", "addresses": [{"address": "National Institute of Technology, Karnataka", "lat": "13.01119095", "lng": "74.79498825", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/e9e4/0e588f8e6510fa5537e0c9e083ceed5d07ad.pdf"}, {"id": "192235f5a9e4c9d6a28ec0d333e36f294b32f764", "title": "Reconfiguring the Imaging Pipeline for Computer Vision", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": 2017, "pdf": "http://www.andrew.cmu.edu/user/sjayasur/iccv.pdf"}, {"id": "cd6aaa37fffd0b5c2320f386be322b8adaa1cc68", "title": "Deep Face Recognition: A Survey", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.06655.pdf"}, {"id": "e4e3faa47bb567491eaeaebb2213bf0e1db989e1", "title": "Empirical Risk Minimization for Metric Learning Using Privileged Information", "addresses": [{"address": "University of Technology Sydney", "lat": "-33.88096510", "lng": "151.20107299", "type": "edu"}, {"address": "Hefei University of Technology", "lat": "31.84691800", "lng": "117.29053367", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/e4e3/faa47bb567491eaeaebb2213bf0e1db989e1.pdf"}, {"id": "4b02387c2db968a70b69d98da3c443f139099e91", "title": "Detecting facial landmarks in the video based on a hybrid framework", "addresses": [{"address": "Guangdong University of Technology", "lat": "23.13538360", "lng": "113.29470496", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4b02/387c2db968a70b69d98da3c443f139099e91.pdf"}, {"id": "f7c50d2be9fba0e4527fd9fbe3095e9d9a94fdd3", "title": "Large Margin Multi-metric Learning for Face and Kinship Verification in the Wild", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/f7c5/0d2be9fba0e4527fd9fbe3095e9d9a94fdd3.pdf"}, {"id": "217de4ff802d4904d3f90d2e24a29371307942fe", "title": "POOF: Part-Based One-vs.-One Features for Fine-Grained Categorization, Face Verification, and Attribute Estimation", "addresses": [{"address": "Columbia University", "lat": "40.84198360", "lng": "-73.94368971", "type": "edu"}], "year": 2013, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2013.128"}, {"id": "e3b324101157daede3b4d16bdc9c2388e849c7d4", "title": "Robust Real-Time 3 D Face Tracking from RGBD Videos under Extreme Pose , Depth , and Expression Variations", "addresses": [{"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/e3b3/24101157daede3b4d16bdc9c2388e849c7d4.pdf"}, {"id": "8ee5b1c9fb0bded3578113c738060290403ed472", "title": "Extending explicit shape regression with mixed feature channels and pose priors", "addresses": [{"address": "Karlsruhe Institute of Technology", "lat": "49.10184375", "lng": "8.43312560", "type": "edu"}], "year": 2014, "pdf": "https://infoscience.epfl.ch/record/200452/files/wacv2014-RGE.pdf"}, {"id": "0c59071ddd33849bd431165bc2d21bbe165a81e0", "title": "Person Recognition in Personal Photo Collections", "addresses": [{"address": "Max Planck Institute for Informatics", "lat": "49.25795660", "lng": "7.04577417", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Oh_Person_Recognition_in_ICCV_2015_paper.pdf"}, {"id": "4223917177405eaa6bdedca061eb28f7b440ed8e", "title": "B-spline Shape from Motion & Shading: An Automatic Free-form Surface Modeling for Face Reconstruction", "addresses": [{"address": "Tianjin University", "lat": "36.20304395", "lng": "117.05842113", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4223/917177405eaa6bdedca061eb28f7b440ed8e.pdf"}, {"id": "90ad0daa279c3e30b360f9fe9371293d68f4cebf", "title": "Spatio-temporal Framework and Algorithms for Video-based Face Recognition", "addresses": [{"address": "Multimedia University", "lat": "2.92749755", "lng": "101.64185301", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/90ad/0daa279c3e30b360f9fe9371293d68f4cebf.pdf"}, {"id": "c675534be881e59a78a5986b8fb4e649ddd2abbe", "title": "Face recognition by landmark pooling-based CNN with concentrate loss", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296548"}, {"id": "561ae67de137e75e9642ab3512d3749b34484310", "title": "DeepGestalt - Identifying Rare Genetic Syndromes Using Deep Learning", "addresses": [{"address": "Rheinische-Friedrich-Wilhelms University", "lat": "50.73381240", "lng": "7.10224650", "type": "edu"}, {"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}, {"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/561a/e67de137e75e9642ab3512d3749b34484310.pdf"}, {"id": "39f03d1dfd94e6f06c1565d7d1bb14ab0eee03bc", "title": "Simultaneous Local Binary Feature Learning and Encoding for Face Recognition", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}, {"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2015, "pdf": "http://openaccess.thecvf.com/content_iccv_2015/papers/Lu_Simultaneous_Local_Binary_ICCV_2015_paper.pdf"}, {"id": "2b869d5551b10f13bf6fcdb8d13f0aa4d1f59fc4", "title": "Ring loss: Convex Feature Normalization for Face Recognition", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.00130.pdf"}, {"id": "b1fdd4ae17d82612cefd4e78b690847b071379d3", "title": "Supervised Descent Method", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/4fc5/416b6c7173d3462e5be796bda3ad8d5645a1.pdf"}, {"id": "1921795408345751791b44b379f51b7dd54ebfa2", "title": "From Face Recognition to Models of Identity: A Bayesian Approach to Learning About Unknown Identities from Unsupervised Data", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.07872.pdf"}, {"id": "f05ad40246656a977cf321c8299158435e3f3b61", "title": "Face Recognition Using Face Patch Networks", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Lu_Face_Recognition_Using_2013_ICCV_paper.pdf"}, {"id": "5134353bd01c4ea36bd007c460e8972b1541d0ad", "title": "Face Recognition with Multi-Resolution Spectral Feature Images", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}, {"address": "Anhui University", "lat": "31.76909325", "lng": "117.17795091", "type": "edu"}], "year": "2013", "pdf": "https://pdfs.semanticscholar.org/5134/353bd01c4ea36bd007c460e8972b1541d0ad.pdf"}, {"id": "29db046dd1f8100b279c3f5f5c5ef19bdbf5af9a", "title": "Recent Progress of Face Image Synthesis", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1706.04717.pdf"}, {"id": "940e5c45511b63f609568dce2ad61437c5e39683", "title": "Fiducial Facial Point Extraction Using a Novel Projective Invariant", "addresses": [{"address": "Dalian University of Technology", "lat": "38.88140235", "lng": "121.52281098", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/TIP.2015.2390976"}, {"id": "785eeac2e236a85a45b4e0356c0745279c31e089", "title": "Learning Person-Specific Representations From Faces in the Wild", "addresses": [{"address": "Harvard University", "lat": "42.36782045", "lng": "-71.12666653", "type": "edu"}, {"address": "State University of Campinas", "lat": "-22.81377650", "lng": "-47.06400040", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/TIFS.2014.2359543"}, {"id": "9901f473aeea177a55e58bac8fd4f1b086e575a4", "title": "Human and sheep facial landmarks localisation by triplet interpolated features", "addresses": [{"address": "University of Cambridge", "lat": "52.17638955", "lng": "0.14308882", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1509.04954.pdf"}, {"id": "e1449be4951ba7519945cd1ad50656c3516113da", "title": "Local Gradient Hexa Pattern: A Descriptor for Face Recognition and Retrieval", "addresses": [{"address": "IIIT Allahabad, India", "lat": "25.42991140", "lng": "81.77118270", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/TCSVT.2016.2603535"}, {"id": "f92ade569cbe54344ffd3bb25efd366dcd8ad659", "title": "Effect of Super Resolution on High Dimensional Features for Unsupervised Face Recognition in the Wild", "addresses": [{"address": "University of Bridgeport", "lat": "41.16648580", "lng": "-73.19205640", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1704.01464.pdf"}, {"id": "9989ad33b64accea8042e386ff3f1216386ba7f1", "title": "Facial feature extraction method based on shallow and deep fusion CNN", "addresses": [{"address": "Guilin University of Electronic Technology Guangxi Guilin", "lat": "25.28739920", "lng": "110.33242770", "type": "edu"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393320"}, {"id": "8efda5708bbcf658d4f567e3866e3549fe045bbb", "title": "Pre-trained Deep Convolutional Neural Networks for Face Recognition", "addresses": [{"address": "University of Groningen", "lat": "53.21967825", "lng": "6.56251482", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/8efd/a5708bbcf658d4f567e3866e3549fe045bbb.pdf"}, {"id": "3b092733f428b12f1f920638f868ed1e8663fe57", "title": "On the size of Convolutional Neural Networks and generalization performance", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "http://www.math.jhu.edu/~data/RamaPapers/PerformanceBounds.pdf"}, {"id": "55966926e7c28b1eee1c7eb7a0b11b10605a1af0", "title": "Surpassing Human-Level Face Verification Performance on LFW with GaussianFace", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/baa8/bdeb5aa545af5b5f43efaf9dda08490da0bc.pdf"}, {"id": "e6d6203fa911429d76f026e2ec2de260ec520432", "title": "Siamese network features for image matching", "addresses": [{"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}, {"address": "Aalto University", "lat": "60.18558755", "lng": "24.82427330", "type": "edu"}], "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899663"}, {"id": "4d3c4c3fe8742821242368e87cd72da0bd7d3783", "title": "Hybrid Deep Learning for Face Verification", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2013, "pdf": "http://www.ee.cuhk.edu.hk/~xgwang/papers/sunWTiccv13.pdf"}, {"id": "9af9a88c60d9e4b53e759823c439fc590a4b5bc5", "title": "Learning Deep Convolutional Embeddings for Face Representation Using Joint Sample- and Set-Based Supervision", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.00277.pdf"}, {"id": "939123cf21dc9189a03671484c734091b240183e", "title": "Within- and cross- database evaluations for face gender classification via befit protocols", "addresses": [{"address": "IDIAP Research Institute", "lat": "46.10923700", "lng": "7.08453549", "type": "edu"}], "year": 2014, "pdf": "http://publications.idiap.ch/downloads/papers/2015/Erdogmus_MMSP_2015.pdf"}, {"id": "061e29eae705f318eee703b9e17dc0989547ba0c", "title": "Enhancing Expression Recognition in the Wild with Unlabeled Reference Data", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/061e/29eae705f318eee703b9e17dc0989547ba0c.pdf"}, {"id": "435642641312364e45f4989fac0901b205c49d53", "title": "Face Model Compression by Distilling Knowledge from Neurons", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4356/42641312364e45f4989fac0901b205c49d53.pdf"}, {"id": "80d42f74ee9bf03f3790c8d0f5a307deffe0b3b7", "title": "Learning Kernel Extended Dictionary for Face Recognition", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TNNLS.2016.2522431"}, {"id": "edbddf8c176d6e914f0babe64ad56c051597d415", "title": "Predicting Image Memorability Through Adaptive Transfer Learning From External Sources", "addresses": [{"address": "Shandong University", "lat": "36.36934730", "lng": "120.67381800", "type": "edu"}, {"address": "Tianjin University", "lat": "36.20304395", "lng": "117.05842113", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TMM.2016.2644866"}, {"id": "18c6c92c39c8a5a2bb8b5673f339d3c26b8dcaae", "title": "Learning invariant representations and applications to face verification", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}, {"address": "McGovern Institute for Brain Research", "lat": "42.36262950", "lng": "-71.09144810", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/18c6/c92c39c8a5a2bb8b5673f339d3c26b8dcaae.pdf"}, {"id": "37c8514df89337f34421dc27b86d0eb45b660a5e", "title": "Facial Landmark Tracking by Tree-Based Deformable Part Model Based Detector", "addresses": [{"address": "Czech Technical University", "lat": "50.07642960", "lng": "14.41802312", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w25/papers/Uricar_Facial_Landmark_Tracking_ICCV_2015_paper.pdf"}, {"id": "f3a59d85b7458394e3c043d8277aa1ffe3cdac91", "title": "Query-Free Attacks on Industry-Grade Face Recognition Systems under Resource Constraints", "addresses": [{"address": "Indiana University", "lat": "39.86948105", "lng": "-84.87956905", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1802.09900.pdf"}, {"id": "96e1ccfe96566e3c96d7b86e134fa698c01f2289", "title": "Semi-adversarial Networks: Convolutional Autoencoders for Imparting Privacy to Face Images", "addresses": [{"address": "International Institute of Information Technology", "lat": "17.44549570", "lng": "78.34854698", "type": "edu"}, {"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1712.00321.pdf"}, {"id": "d78fbd11f12cbc194e8ede761d292dc2c02d38a2", "title": "Enhancing Gray Scale Images for Face Detection under Unstable Lighting Condition", "addresses": [{"address": "University of Dschang", "lat": "5.44094480", "lng": "10.07120561", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/d78f/bd11f12cbc194e8ede761d292dc2c02d38a2.pdf"}, {"id": "86d0127e1fd04c3d8ea78401c838af621647dc95", "title": "A Novel Multi-Task Tensor Correlation Neural Network for Facial Attribute Prediction", "addresses": [{"address": "Hunan University", "lat": "26.88111275", "lng": "112.62850666", "type": "edu"}, {"address": "National University of Defense Technology, China", "lat": "28.22902090", "lng": "112.99483204", "type": "edu"}, {"address": "University of Texas at San Antonio", "lat": "29.58333105", "lng": "-98.61944505", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.02810.pdf"}, {"id": "103a7c3eba36792886ae8005f6492332e6b05bad", "title": "Facial Recognition with Encoded Local Projections", "addresses": [{"address": "University of Waterloo", "lat": "43.47061295", "lng": "-80.54724732", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.06218.pdf"}, {"id": "291265db88023e92bb8c8e6390438e5da148e8f5", "title": "MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition", "addresses": [{"address": "Microsoft", "lat": "47.64233180", "lng": "-122.13693020", "type": "company"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf"}, {"id": "35d42f4e7a1d898bc8e2d052c38e1106f3e80188", "title": "Human and algorithm performance on the PaSC face Recognition Challenge", "addresses": [{"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}, {"address": "University of Texas at Dallas", "lat": "32.98207990", "lng": "-96.75662780", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/BTAS.2015.7358765"}, {"id": "06d7ef72fae1be206070b9119fb6b61ce4699587", "title": "On One-Shot Similarity Kernels: Explicit Feature Maps and Properties", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Middlesex University", "lat": "51.59029705", "lng": "-0.22963221", "type": "edu"}, {"address": "University of Patras", "lat": "38.28994820", "lng": "21.78864690", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Zafeiriou_On_One-Shot_Similarity_2013_ICCV_paper.pdf"}, {"id": "c17c7b201cfd0bcd75441afeaa734544c6ca3416", "title": "Layerwise Class-Aware Convolutional Neural Network", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "Southeast University", "lat": "32.05752790", "lng": "118.78682252", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TCSVT.2016.2587389"}, {"id": "e6da1fcd2a8cda0c69b3d94812caa7d844903007", "title": "Sonicdoor: scaling person identification with ultrasonic sensors by novel modeling of shape, behavior and walking patterns", "addresses": [{"address": "University of Houston", "lat": "29.72079020", "lng": "-95.34406271", "type": "edu"}], "year": "2017", "pdf": "http://dl.acm.org/citation.cfm?id=3137154"}, {"id": "cd74d606e76ecddee75279679d9770cdc0b49861", "title": "Transfer Learning of Structured Representation for Face Recognition", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/TIP.2014.2365725"}, {"id": "2e58ec57d71b2b2a3e71086234dd7037559cc17e", "title": "A Gender Recognition System from Facial Image", "addresses": [{"address": "Institute of Information Technology", "lat": "23.72898990", "lng": "90.39826820", "type": "edu"}, {"address": "University of Dhaka", "lat": "23.73169570", "lng": "90.39652750", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/2e58/ec57d71b2b2a3e71086234dd7037559cc17e.pdf"}, {"id": "8f99f7ccb85af6d4b9e015a9b215c529126e7844", "title": "Face image-based age and gender estimation with consideration of ethnic difference", "addresses": [{"address": "Korea Advanced Institute of Science and Technology", "lat": "36.36971910", "lng": "127.36253700", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ROMAN.2017.8172359"}, {"id": "4b605e6a9362485bfe69950432fa1f896e7d19bf", "title": "A Comparison of Human and Automated Face Verification Accuracy on Unconstrained Image Sets", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2016, "pdf": "http://biometrics.cse.msu.edu/Publications/Face/BlantonAllenMillerKalkaJain_CVPRWB2016_HID.pdf"}, {"id": "2af2b74c3462ccff3a6881ff7cf4f321b3242fa9", "title": "Name-Face Association in Web Videos: A Large-Scale Dataset, Baselines, and Open Issues", "addresses": [{"address": "City University of Hong Kong", "lat": "22.34000115", "lng": "114.16970291", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "Fudan University", "lat": "31.30104395", "lng": "121.50045497", "type": "edu"}], "year": 2014, "pdf": "http://yugangjiang.info/publication/JCST-nameface.pdf"}, {"id": "9ff931ca721d50e470e1a38e583c7b18b6cdc2cc", "title": "An Overview and Empirical Comparison of Distance Metric Learning Methods", "addresses": [{"address": "University of Houston", "lat": "29.72079020", "lng": "-95.34406271", "type": "edu"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7407637"}, {"id": "578117ff493d691166fefc52fd61bad70d8752a9", "title": "Dealing with occlusions in face recognition by region-based fusion", "addresses": [{"address": "Universidad Autonoma de Madrid", "lat": "40.48256135", "lng": "-3.69060790", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/CCST.2016.7815707"}, {"id": "3cea3aba77649d718991d0cb30135887267c11e8", "title": "Adversarial Attack Type I: Generating False Positives", "addresses": [{"address": "Shanghai Jiao Tong University", "lat": "31.20081505", "lng": "121.42840681", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.00594.pdf"}, {"id": "053931267af79a89791479b18d1b9cde3edcb415", "title": "Attributes for Improved Attributes: A Multi-Task Network Utilizing Implicit and Explicit Relationships for Facial Attribute Classification", "addresses": [{"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/0539/31267af79a89791479b18d1b9cde3edcb415.pdf"}, {"id": "02467703b6e087799e04e321bea3a4c354c5487d", "title": "Grouper: Optimizing Crowdsourced Face Annotations", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.27"}, {"id": "afe9cfba90d4b1dbd7db1cf60faf91f24d12b286", "title": "Principal Directions of Synthetic Exact Filters for Robust Real-Time Eye Localization", "addresses": [{"address": "University of Ljubljana", "lat": "46.05015580", "lng": "14.46907327", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/afe9/cfba90d4b1dbd7db1cf60faf91f24d12b286.pdf"}, {"id": "4ff4c27e47b0aa80d6383427642bb8ee9d01c0ac", "title": "Deep Convolutional Neural Networks and Support Vector Machines for Gender Recognition", "addresses": [{"address": "University of Groningen", "lat": "53.21967825", "lng": "6.56251482", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/SSCI.2015.37"}, {"id": "0ce3a786aed896d128f5efdf78733cc675970854", "title": "Learning the Face Prior for Bayesian Face Recognition", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/3689/2b6bb4848a9c21158b8eded7f14a6654dd7e.pdf"}, {"id": "628a3f027b7646f398c68a680add48c7969ab1d9", "title": "Plan for Final Year Project : HKU-Face : A Large Scale Dataset for Deep Face Recognition", "addresses": [{"address": "Facebook", "lat": "37.39367170", "lng": "-122.08072620", "type": "company"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/628a/3f027b7646f398c68a680add48c7969ab1d9.pdf"}, {"id": "9fc993aeb0a007ccfaca369a9a8c0ccf7697261d", "title": "Context-Aware Local Binary Feature Learning for Face Recognition", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7936534"}, {"id": "746c0205fdf191a737df7af000eaec9409ede73f", "title": "Investigating Nuisances in DCNN-Based Face Recognition", "addresses": [{"address": "University of Florence", "lat": "43.77764260", "lng": "11.25976500", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8423119"}, {"id": "3802c97f925cb03bac91d9db13d8b777dfd29dcc", "title": "Non-parametric Bayesian Constrained Local Models", "addresses": [{"address": "Institute of Systems and Robotics", "lat": "53.83383710", "lng": "10.70359390", "type": "edu"}], "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.232"}, {"id": "71ca8b6e84c17b3e68f980bfb8cddc837100f8bf", "title": "Effective 3D based frontalization for unconstrained face recognition", "addresses": [{"address": "University of Florence", "lat": "43.77764260", "lng": "11.25976500", "type": "edu"}], "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899774"}, {"id": "5957936195c10521dadc9b90ca9b159eb1fc4871", "title": "LBP-ferns-based feature extraction for robust facial recognition", "addresses": [{"address": "Korea University", "lat": "37.59014110", "lng": "127.03623180", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/TCE.2016.7838098"}, {"id": "d0d75a7116a76ccd98a3aeb6f6fff10ba91de1c1", "title": "Constrained Metric Learning by Permutation Inducing Isometries", "addresses": [{"address": "Qatar University", "lat": "25.37461295", "lng": "51.48980354", "type": "edu"}, {"address": "University of Western Australia", "lat": "-31.95040445", "lng": "115.79790037", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/TIP.2015.2502144"}, {"id": "79db191ca1268dc88271abef3179c4fe4ee92aed", "title": "Facial Expression Based Automatic Album Creation", "addresses": [{"address": "Australian National University", "lat": "-35.27769990", "lng": "149.11852700", "type": "edu"}, {"address": "University of Canberra", "lat": "-35.23656905", "lng": "149.08446994", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/79db/191ca1268dc88271abef3179c4fe4ee92aed.pdf"}, {"id": "3983370efe7a7521bde255017171724d845b3383", "title": "Learning Discriminators as Energy Networks in Adversarial Learning", "addresses": [{"address": "University of Technology Sydney", "lat": "-33.88096510", "lng": "151.20107299", "type": "edu"}, {"address": "University of Iowa", "lat": "41.66590000", "lng": "-91.57310307", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.01152.pdf"}, {"id": "faf19885431cb39360158982c3a1127f6090a1f6", "title": "Inheritable Fisher vector feature for kinship verification", "addresses": [{"address": "New Jersey Institute of Technology", "lat": "40.74230250", "lng": "-74.17928172", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/BTAS.2015.7358768"}, {"id": "c4f3185f010027a0a97fcb9753d74eb27a9cfd3e", "title": "Learning to classify gender from four million images", "addresses": [{"address": "University of Bristol", "lat": "51.45848370", "lng": "-2.60977520", "type": "edu"}], "year": "2015", "pdf": "http://doi.org/10.1016/j.patrec.2015.02.006"}, {"id": "b68452e28951bf8db5f1193eca3a8fd9e2d0d7ef", "title": "Approximate radial gradient transform based face recognition", "addresses": [{"address": "Mangalore University", "lat": "12.81608485", "lng": "74.92449278", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICACCI.2015.7275752"}, {"id": "5e0e516226413ea1e973f1a24e2fdedde98e7ec0", "title": "The Invariance Hypothesis and the Ventral Stream", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/74ce/97da57ec848db660ee69dec709f226c74f43.pdf"}, {"id": "d8896861126b7fd5d2ceb6fed8505a6dff83414f", "title": "In-plane Rotational Alignment of Faces by Eye and Eye-pair Detection", "addresses": [{"address": "University of Groningen", "lat": "53.21967825", "lng": "6.56251482", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/d889/6861126b7fd5d2ceb6fed8505a6dff83414f.pdf"}, {"id": "70d2ab1af0edd5c0a30d576a5d4aa397c4f92d3e", "title": "Elastic preserving projections based on L1-norm maximization", "addresses": [{"address": "Beihang University", "lat": "39.98083330", "lng": "116.34101249", "type": "edu"}], "year": "2018", "pdf": "http://doi.org/10.1007/s11042-018-5608-2"}, {"id": "b2cd92d930ed9b8d3f9dfcfff733f8384aa93de8", "title": "HyperFace: A Deep Multi-task Learning Framework for Face Detection, Landmark Localization, Pose Estimation, and Gender Recognition", "addresses": [{"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/b2cd/92d930ed9b8d3f9dfcfff733f8384aa93de8.pdf"}, {"id": "148eb413bede35487198ce7851997bf8721ea2d6", "title": "People Search in Surveillance Videos", "addresses": [{"address": "IBM Research, North Carolina", "lat": "35.90422720", "lng": "-78.85565763", "type": "company"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/148e/b413bede35487198ce7851997bf8721ea2d6.pdf"}, {"id": "e3e2c106ccbd668fb9fca851498c662add257036", "title": "Appearance, context and co-occurrence ensembles for identity recognition in personal photo collections", "addresses": [{"address": "University of Colorado at Colorado Springs", "lat": "38.89646790", "lng": "-104.80505940", "type": "edu"}], "year": 2013, "pdf": "http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-et-al-Ensembles.pdf"}, {"id": "d22b378fb4ef241d8d210202893518d08e0bb213", "title": "Random Faces Guided Sparse Many-to-One Encoder for Pose-Invariant Face Recognition", "addresses": [{"address": "Northeastern University", "lat": "42.33836680", "lng": "-71.08793524", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Zhang_Random_Faces_Guided_2013_ICCV_paper.pdf"}, {"id": "dbb9601a1d2febcce4c07dd2b819243d81abb2c2", "title": "Landmark Free Face Attribute Prediction", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "SAP Innovation Center Network, Singapore", "lat": "1.27486000", "lng": "103.79778700", "type": "company"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8361884"}, {"id": "8633732d9f787f8497c2696309c7d70176995c15", "title": "Multi-objective convolutional learning for face labeling", "addresses": [{"address": "Baidu Research, USA", "lat": "37.40922650", "lng": "-122.02366150", "type": "company"}, {"address": "University of California, Merced", "lat": "37.36566745", "lng": "-120.42158888", "type": "edu"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298967"}, {"id": "7fb5006b6522436ece5bedf509e79bdb7b79c9a7", "title": "Multi-Task Convolutional Neural Network for Face Recognition", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/7fb5/006b6522436ece5bedf509e79bdb7b79c9a7.pdf"}, {"id": "368e99f669ea5fd395b3193cd75b301a76150f9d", "title": "One-to-many face recognition with bilinear CNNs", "addresses": [{"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1506.01342.pdf"}, {"id": "c18a03568d4b512a0d8380cbb1fbf6bd56d11f05", "title": "A Wearable IoT with Complex Artificial Perception Embedding for Alzheimer Patients", "addresses": [{"address": "Electrical and Computer Engineering", "lat": "33.58667840", "lng": "-101.87539204", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8430403"}, {"id": "a6b5ffb5b406abfda2509cae66cdcf56b4bb3837", "title": "One Shot Similarity Metric Learning for Action Recognition", "addresses": [{"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}, {"address": "Open University", "lat": "52.02453775", "lng": "-0.70927481", "type": "edu"}, {"address": "Weizmann Institute of Science", "lat": "31.90784990", "lng": "34.81334092", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/bce2/02717ce134b317b39f0a18151659d643875b.pdf"}, {"id": "4f36c14d1453fc9d6481b09c5a09e91d8d9ee47a", "title": "Video-Based Face Recognition Using the Intra/Extra-Personal Difference Dictionary", "addresses": [{"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/4f36/c14d1453fc9d6481b09c5a09e91d8d9ee47a.pdf"}, {"id": "c07ab025d9e3c885ad5386e6f000543efe091c4b", "title": "Preserving Model Privacy for Machine Learning in Distributed Systems", "addresses": [{"address": "Binghamton University", "lat": "42.09580770", "lng": "-75.91455689", "type": "edu"}, {"address": "University of Florida", "lat": "29.63287840", "lng": "-82.34901330", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8302601"}, {"id": "5798055e11e25c404b1b0027bc9331bcc6e00555", "title": "PDSS: patch-descriptor-similarity space for effective face verification", "addresses": [{"address": "Peking University", "lat": "39.99223790", "lng": "116.30393816", "type": "edu"}], "year": 2012, "pdf": "http://doi.acm.org/10.1145/2393347.2396357"}, {"id": "c43ed9b34cad1a3976bac7979808eb038d88af84", "title": "Semi-supervised Adversarial Learning to Generate Photorealistic Face Images of New Identities from 3D Morphable Model", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.03675.pdf"}, {"id": "c98983592777952d1751103b4d397d3ace00852d", "title": "Face Synthesis from Facial Identity Features", "addresses": [{"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/c989/83592777952d1751103b4d397d3ace00852d.pdf"}, {"id": "48174c414cfce7f1d71c4401d2b3d49ba91c5338", "title": "Robust Performance-driven 3D Face Tracking in Long Range Depth Scenes", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}, {"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/4817/4c414cfce7f1d71c4401d2b3d49ba91c5338.pdf"}, {"id": "c2e03efd8c5217188ab685e73cc2e52c54835d1a", "title": "Deep tree-structured face: A unified representation for multi-task facial biometrics", "addresses": [{"address": "University of Tennessee", "lat": "35.95424930", "lng": "-83.93073950", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477585"}, {"id": "21959bc56a160ebd450606867dce1462a913afab", "title": "Face recognition based on manifold constrained joint sparse sensing with K-SVD", "addresses": [{"address": "Beijing Jiaotong University", "lat": "39.94976005", "lng": "116.33629046", "type": "edu"}, {"address": "Curtin University", "lat": "-32.00686365", "lng": "115.89691775", "type": "edu"}, {"address": "Shanghai University", "lat": "31.32235655", "lng": "121.38400941", "type": "edu"}], "year": "2018", "pdf": "http://doi.org/10.1007/s11042-018-6071-9"}, {"id": "06518858bd99cddf9bc9200fac5311fc29ac33b4", "title": "Sparse Low-Rank Component-Based Representation for Face Recognition With Low-Quality Images", "addresses": [{"address": "East China Normal University", "lat": "31.22849230", "lng": "121.40211389", "type": "edu"}, {"address": "Tongji University", "lat": "31.28473925", "lng": "121.49694909", "type": "edu"}], "year": "2019", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8392777"}, {"id": "f1d6da83dcf71eda45a56a86c5ae13e7f45a8536", "title": "A Secure Face-Verification Scheme Based on Homomorphic Encryption and Deep Neural Networks", "addresses": [{"address": "Beijing University of Technology", "lat": "39.87391435", "lng": "116.47722285", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ACCESS.2017.2737544"}, {"id": "337dd4aaca2c5f9b5d2de8e0e2401b5a8feb9958", "title": "Data-specific Adaptive Threshold for Face Recognition and Authentication", "addresses": [{"address": "Institute of Information Science", "lat": "25.04107280", "lng": "121.61475620", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.11160.pdf"}, {"id": "4d16337cc0431cd43043dfef839ce5f0717c3483", "title": "A Scalable and Privacy-Aware IoT Service for Live Video Analytics", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/4d16/337cc0431cd43043dfef839ce5f0717c3483.pdf"}, {"id": "1d696a1beb42515ab16f3a9f6f72584a41492a03", "title": "Deeply learned face representations are sparse, selective, and robust", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2015, "pdf": "http://www.ee.cuhk.edu.hk/~xgwang/papers/sunWTcvpr15.pdf"}, {"id": "683ec608442617d11200cfbcd816e86ce9ec0899", "title": "Dual Linear Regression Based Classification for Face Cluster Recognition", "addresses": [{"address": "University of Northern British Columbia", "lat": "53.89256620", "lng": "-122.81471592", "type": "edu"}], "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.342"}, {"id": "65293ecf6a4c5ab037a2afb4a9a1def95e194e5f", "title": "Face , Age and Gender Recognition using Local Descriptors", "addresses": [{"address": "University of Ottawa", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/6529/3ecf6a4c5ab037a2afb4a9a1def95e194e5f.pdf"}, {"id": "25d514d26ecbc147becf4117512523412e1f060b", "title": "Annotated crowd video face database", "addresses": [{"address": "Indian Institute of Technology Delhi", "lat": "28.54632595", "lng": "77.27325504", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICB.2015.7139083"}, {"id": "e40cb4369c6402ae53c81ce52b73df3ef89f578b", "title": "Facial image clustering in stereoscopic videos using double spectral analysis", "addresses": [{"address": "Aristotle University of Thessaloniki", "lat": "40.62984145", "lng": "22.95889350", "type": "edu"}], "year": "2015", "pdf": "http://doi.org/10.1016/j.image.2015.01.009"}, {"id": "2d3482dcff69c7417c7b933f22de606a0e8e42d4", "title": "Labeled Faces in the Wild : Updates and New Reporting Procedures", "addresses": [{"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf"}, {"id": "4ea4116f57c5d5033569690871ba294dc3649ea5", "title": "Multi-View Face Alignment Using 3D Shape Model for View Estimation", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/4ea4/116f57c5d5033569690871ba294dc3649ea5.pdf"}, {"id": "7cffcb4f24343a924a8317d560202ba9ed26cd0b", "title": "The unconstrained ear recognition challenge", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Islamic Azad University", "lat": "34.84529990", "lng": "48.55962120", "type": "edu"}, {"address": "University of Colorado, Colorado Springs", "lat": "38.89207560", "lng": "-104.79716389", "type": "edu"}, {"address": "University of Ljubljana", "lat": "46.05015580", "lng": "14.46907327", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.06997.pdf"}, {"id": "185263189a30986e31566394680d6d16b0089772", "title": "Efficient Annotation of Objects for Video Analysis", "addresses": [{"address": "International Institute of Information Technology", "lat": "17.44549570", "lng": "78.34854698", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/1852/63189a30986e31566394680d6d16b0089772.pdf"}, {"id": "5da827fe558fb2e1124dcc84ef08311241761726", "title": "Attribute preserved face de-identification", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139096"}, {"id": "1de23d7fe718d9fab0159f58f422099e44ad3f0a", "title": "Locality Preserving Collaborative Representation for Face Recognition", "addresses": [{"address": "Xiamen University", "lat": "24.43994190", "lng": "118.09301781", "type": "edu"}], "year": "2016", "pdf": "http://doi.org/10.1007/s11063-016-9558-2"}, {"id": "4f7b92bd678772552b3c3edfc9a7c5c4a8c60a8e", "title": "Deep Density Clustering of Unconstrained Faces", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/4f7b/92bd678772552b3c3edfc9a7c5c4a8c60a8e.pdf"}, {"id": "5bb53fb36a47b355e9a6962257dd465cd7ad6827", "title": "Mask-off: Synthesizing Face Images in the Presence of Head-mounted Displays", "addresses": [{"address": "North Carolina Central University", "lat": "35.97320905", "lng": "-78.89755054", "type": "edu"}, {"address": "University of Kentucky", "lat": "38.03337420", "lng": "-84.50177580", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/5bb5/3fb36a47b355e9a6962257dd465cd7ad6827.pdf"}, {"id": "b18858ad6ec88d8b443dffd3e944e653178bc28b", "title": "Trojaning Attack on Neural Networks", "addresses": [{"address": "Purdue University", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/b188/58ad6ec88d8b443dffd3e944e653178bc28b.pdf"}, {"id": "ee7093e91466b81d13f4d6933bcee48e4ee63a16", "title": "Discovering Person Identity via Large-Scale Observations", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/ee70/93e91466b81d13f4d6933bcee48e4ee63a16.pdf"}, {"id": "687e17db5043661f8921fb86f215e9ca2264d4d2", "title": "A robust elastic and partial matching metric for face recognition", "addresses": [{"address": "Microsoft", "lat": "47.64233180", "lng": "-122.13693020", "type": "company"}], "year": 2009, "pdf": "http://www.ece.northwestern.edu/~ganghua/publication/ICCV09a.pdf"}, {"id": "fffe5ab3351deab81f7562d06764551422dbd9c4", "title": "Fully automated facial picture evaluation using high level attributes", "addresses": [{"address": "GIPSA-Lab, Grenoble, France", "lat": "45.19292450", "lng": "5.76619830", "type": "edu"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163114"}, {"id": "1ba9d12f24ac04f0309e8ff9b0162c6e18d97dc3", "title": "Robust Face Recognition with Deep Multi-View Representation Learning", "addresses": [{"address": "Hefei University of Technology", "lat": "31.84691800", "lng": "117.29053367", "type": "edu"}, {"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": 2016, "pdf": "http://doi.acm.org/10.1145/2964284.2984061"}, {"id": "870433ba89d8cab1656e57ac78f1c26f4998edfb", "title": "Regressing Robust and Discriminative 3D Morphable Models with a Very Deep Neural Network", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.163"}, {"id": "e3a6e5a573619a97bd6662b652ea7d088ec0b352", "title": "Compare and Contrast: Learning Prominent Visual Differences", "addresses": [{"address": "University of Texas at Austin", "lat": "30.28415100", "lng": "-97.73195598", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.00112.pdf"}, {"id": "60737db62fb5fab742371709485e4b2ddf64b7b2", "title": "Crowdsourced Selection on Multi-Attribute Data", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2017, "pdf": "http://doi.acm.org/10.1145/3132847.3132891"}, {"id": "288964068cd87d97a98b8bc927d6e0d2349458a2", "title": "Mean-Variance Loss for Deep Age Estimation from a Face", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/2889/64068cd87d97a98b8bc927d6e0d2349458a2.pdf"}, {"id": "59d45281707b85a33d6f50c6ac6b148eedd71a25", "title": "Rank Minimization across Appearance and Shape for AAM Ensemble Fitting", "addresses": [{"address": "Queensland University of Technology", "lat": "-27.47715625", "lng": "153.02841004", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Cheng_Rank_Minimization_across_2013_ICCV_paper.pdf"}, {"id": "818ecc8c8d4dc398b01a852df90cb8d972530fa5", "title": "Unsupervised Training for 3D Morphable Model Regression", "addresses": [{"address": "Princeton University", "lat": "40.34829285", "lng": "-74.66308325", "type": "edu"}, {"address": "MIT CSAIL", "lat": "42.36194070", "lng": "-71.09043780", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.06098.pdf"}, {"id": "d99b5ee3e2d7e3a016fbc5fd417304e15efbd1f8", "title": "A Novel Two-stage Learning Pipeline for Deep Neural Networks", "addresses": [{"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}], "year": "2017", "pdf": "http://doi.org/10.1007/s11063-017-9578-6"}, {"id": "4b3f425274b0c2297d136f8833a31866db2f2aec", "title": "Toward Open-Set Face Recognition", "addresses": [{"address": "University of Colorado, Colorado Springs", "lat": "38.89207560", "lng": "-104.79716389", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.85"}, {"id": "12003a7d65c4f98fb57587fd0e764b44d0d10125", "title": "Face recognition in the wild with the Probabilistic Gabor-Fisher Classifier", "addresses": [{"address": "University of Ljubljana", "lat": "46.05015580", "lng": "14.46907327", "type": "edu"}], "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2015.7284835"}, {"id": "7a09e8f65bd85d4c79f0ae90d4e2685869a9894f", "title": "Face and Hair Region Labeling Using Semi-Supervised Spectral Clustering-Based Multiple Segmentations", "addresses": [{"address": "Korea Advanced Institute of Science and Technology", "lat": "36.36971910", "lng": "127.36253700", "type": "edu"}, {"address": "Korea Institute of Oriental Medicine", "lat": "36.39918400", "lng": "127.39465600", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/TMM.2016.2551698"}, {"id": "2bcd9b2b78eb353ea57cf50387083900eae5384a", "title": "Image ranking and retrieval based on multi-attribute queries", "addresses": [{"address": "IBM Thomas J. Watson Research Center", "lat": "41.21002475", "lng": "-73.80407056", "type": "company"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2011", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995329"}, {"id": "77c5437107f8138d48cb7e10b2b286fa51473678", "title": "A pseudo ensemble convolutional neural networks", "addresses": [{"address": "Electronics and Telecommunications Research Institute, Daejeon, Korea", "lat": "36.38376500", "lng": "127.36694000", "type": "edu"}, {"address": "University of Science and Technology, Korea", "lat": "36.38513950", "lng": "127.36834130", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/URAI.2016.7734005"}, {"id": "872dfdeccf99bbbed7c8f1ea08afb2d713ebe085", "title": "L2-constrained Softmax Loss for Discriminative Face Verification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1703.09507.pdf"}, {"id": "57ca530e9acb63487e8591cb6efb89473aa1e5b4", "title": "Multilayer Surface Albedo for Face Recognition With Reference Images in Bad Lighting Conditions", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/TIP.2014.2356292"}, {"id": "a8748a79e8d37e395354ba7a8b3038468cb37e1f", "title": "Seeing the Forest from the Trees: A Holistic Approach to Near-Infrared Heterogeneous Face Recognition", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}, {"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.47"}, {"id": "be28ed1be084385f5d389db25fd7f56cd2d7f7bf", "title": "Exploring computation-communication tradeoffs in camera systems", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1706.03864.pdf"}, {"id": "0e652a99761d2664f28f8931fee5b1d6b78c2a82", "title": "Making a Science of Model Search", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/0e65/2a99761d2664f28f8931fee5b1d6b78c2a82.pdf"}, {"id": "6604fd47f92ce66dd0c669dd66b347b80e17ebc9", "title": "Simultaneous Cascaded Regression", "addresses": [{"address": "Institute of Systems and Robotics", "lat": "53.83383710", "lng": "10.70359390", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/6604/fd47f92ce66dd0c669dd66b347b80e17ebc9.pdf"}, {"id": "46e72046a9bb2d4982d60bcf5c63dbc622717f0f", "title": "Learning Discriminative Features with Class Encoder", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1605.02424.pdf"}, {"id": "2a92bda6dbd5cce5894f7d370d798c07fa8783f4", "title": "Class-Specific Kernel Fusion of Multiple Descriptors for Face Verification Using Multiscale Binarised Statistical Image Features", "addresses": [{"address": "Urmia University", "lat": "37.52914535", "lng": "45.04886077", "type": "edu"}, {"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/TIFS.2014.2359587"}, {"id": "52bf00df3b970e017e4e2f8079202460f1c0e1bd", "title": "Learning High-level Prior with Convolutional Neural Networks for Semantic Segmentation", "addresses": [{"address": "Hong Kong University of Science and Technology", "lat": "22.33863040", "lng": "114.26203370", "type": "edu"}, {"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}, {"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/52bf/00df3b970e017e4e2f8079202460f1c0e1bd.pdf"}, {"id": "0081e2188c8f34fcea3e23c49fb3e17883b33551", "title": "Training Deep Face Recognition Systems with Synthetic Data", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/0081/e2188c8f34fcea3e23c49fb3e17883b33551.pdf"}, {"id": "2201f187a7483982c2e8e2585ad9907c5e66671d", "title": "Joint Face Alignment and 3D Face Reconstruction", "addresses": [{"address": "Sichuan University, Chengdu", "lat": "30.64276900", "lng": "104.06751175", "type": "edu"}, {"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/1cad/9aa5095733b56e998ad0cd396e89c2bc9928.pdf"}, {"id": "4bd3de97b256b96556d19a5db71dda519934fd53", "title": "Latent Factor Guided Convolutional Neural Networks for Age-Invariant Face Recognition", "addresses": [{"address": "South China University of Technology", "lat": "23.05020420", "lng": "113.39880323", "type": "edu"}, {"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.529"}, {"id": "831d661d657d97a07894da8639a048c430c5536d", "title": "Weakly Supervised Facial Analysis with Dense Hyper-Column Features", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.19"}, {"id": "0d746111135c2e7f91443869003d05cde3044beb", "title": "Partial face detection for continuous authentication", "addresses": [{"address": "State University of New Jersey", "lat": "40.51865195", "lng": "-74.44099801", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICIP.2016.7532908"}, {"id": "3bd10f7603c4f5a4737c5613722124787d0dd818", "title": "An Efficient Joint Formulation for Bayesian Face Verification", "addresses": [{"address": "Microsoft", "lat": "47.64233180", "lng": "-122.13693020", "type": "company"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7415949"}, {"id": "63a6c256ec2cf2e0e0c9a43a085f5bc94af84265", "title": "Complexity of multiverse networks and their multilayer generalization", "addresses": [{"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICPR.2016.7899662"}, {"id": "38a2661b6b995a3c4d69e7d5160b7596f89ce0e6", "title": "Randomized Intraclass-Distance Minimizing Binary Codes for face recognition", "addresses": [{"address": "Colorado State University", "lat": "40.57093580", "lng": "-105.08655256", "type": "edu"}, {"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}], "year": 2014, "pdf": "http://www.cs.colostate.edu/~draper/papers/zhang_ijcb14.pdf"}, {"id": "aa1129780cc496918085cd0603a774345c353c54", "title": "Evolutionary Cost-Sensitive Discriminative Learning With Application to Vision and Olfaction", "addresses": [{"address": "Chongqing University", "lat": "29.50841740", "lng": "106.57858552", "type": "edu"}, {"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7779010"}, {"id": "0de91641f37b0a81a892e4c914b46d05d33fd36e", "title": "RAPS: Robust and Efficient Automatic Construction of Person-Specific Deformable Models", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "University of Twente", "lat": "52.23801390", "lng": "6.85667610", "type": "edu"}], "year": 2014, "pdf": "https://ibug.doc.ic.ac.uk/media/uploads/documents/raps.pdf"}, {"id": "2e091b311ac48c18aaedbb5117e94213f1dbb529", "title": "Collaborative Facial Landmark Localization for Transferring Annotations Across Datasets", "addresses": [{"address": "University of Wisconsin Madison", "lat": "43.07982815", "lng": "-89.43066425", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/b1a1/a049f1d78f6e3d072236237c467292ccd537.pdf"}, {"id": "00075519a794ea546b2ca3ca105e2f65e2f5f471", "title": "Generating a Large, Freely-Available Dataset for Face-Related Algorithms", "addresses": [{"address": "Amherst College", "lat": "42.37289000", "lng": "-72.51881400", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/0007/5519a794ea546b2ca3ca105e2f65e2f5f471.pdf"}, {"id": "d6791b98353aa113d79f6fb96335aa6c7ea3b759", "title": "Collaborative Random Faces-Guided Encoders for Pose-Invariant Face Representation Learning", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}, {"address": "University of Massachusetts Dartmouth", "lat": "41.62772475", "lng": "-71.00724501", "type": "edu"}, {"address": "Northeastern University", "lat": "42.33836680", "lng": "-71.08793524", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/TNNLS.2017.2648122"}, {"id": "7caa3a74313f9a7a2dd5b4c2cd7f825d895d3794", "title": "Markov Chain Monte Carlo for Automated Face Image Analysis", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": "2016", "pdf": "http://doi.org/10.1007/s11263-016-0967-5"}, {"id": "1860b8f63ce501bd0dfa9e6f2debc080e88d9baa", "title": "Local Large-Margin Multi-Metric Learning for Face and Kinship Verification", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7894195"}, {"id": "7a131fafa7058fb75fdca32d0529bc7cb50429bd", "title": "Beyond Face Rotation: Global and Local Perception GAN for Photorealistic and Identity Preserving Frontal View Synthesis", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1704.04086.pdf"}, {"id": "2cd7821fcf5fae53a185624f7eeda007434ae037", "title": "Exploring the geo-dependence of human face appearance", "addresses": [{"address": "University of Kentucky", "lat": "38.03337420", "lng": "-84.50177580", "type": "edu"}], "year": 2014, "pdf": "http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf"}, {"id": "270acff7916589a6cc9ca915b0012ffcb75d4899", "title": "On the Applications of Robust PCA in Image and Video Processing", "addresses": [{"address": "New York University", "lat": "40.72925325", "lng": "-73.99625394", "type": "edu"}, {"address": "University of Warwick", "lat": "52.37931310", "lng": "-1.56042520", "type": "edu"}, {"address": "Peking University", "lat": "39.99223790", "lng": "116.30393816", "type": "edu"}, {"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8425659"}, {"id": "588bed36b3cc9e2f26c39b5d99d6687f36ae1177", "title": "Sparsely Encoded Local Descriptor for face recognition", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}, {"address": "Chinese Academy of Science", "lat": "39.90419990", "lng": "116.40739630", "type": "edu"}], "year": "2011", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771389"}, {"id": "8bdf6f03bde08c424c214188b35be8b2dec7cdea", "title": "Inference Attacks Against Collaborative Learning", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.04049.pdf"}, {"id": "09b43b59879d59493df2a93c216746f2cf50f4ac", "title": "Deep Transfer Metric Learning", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_036_ext.pdf"}, {"id": "6342a4c54835c1e14159495373ab18b4233d2d9b", "title": "Towards Pose-robust Face Recognition on Video", "addresses": [{"address": "Queensland University of Technology", "lat": "-27.47715625", "lng": "153.02841004", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/6342/a4c54835c1e14159495373ab18b4233d2d9b.pdf"}, {"id": "34108098e1a378bc15a5824812bdf2229b938678", "title": "Reconstructive Sparse Code Transfer for Contour Detection and Semantic Labeling", "addresses": [{"address": "California Institute of Technology", "lat": "34.13710185", "lng": "-118.12527487", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/3410/8098e1a378bc15a5824812bdf2229b938678.pdf"}, {"id": "1fd6004345245daf101c98935387e6ef651cbb55", "title": "Learning Symmetry Features for Face Detection Based on Sparse Group Lasso", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/1fd6/004345245daf101c98935387e6ef651cbb55.pdf"}, {"id": "5dc52c64991c655a12936867594326cf6352eb8e", "title": "Constructing Local Binary Pattern Statistics by Soft Voting", "addresses": [{"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": "2013", "pdf": "https://pdfs.semanticscholar.org/5dc5/2c64991c655a12936867594326cf6352eb8e.pdf"}, {"id": "2c424f21607ff6c92e640bfe3da9ff105c08fac4", "title": "Learning Structured Output Representation using Deep Conditional Generative Models", "addresses": [{"address": "University of Michigan", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/3f25/e17eb717e5894e0404ea634451332f85d287.pdf"}, {"id": "241d2c517dbc0e22d7b8698e06ace67de5f26fdf", "title": "Online, Real-Time Tracking Using a Category-to-Individual Detector", "addresses": [{"address": "California Institute of Technology", "lat": "34.13710185", "lng": "-118.12527487", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/bfc3/546fa119443fdcbac3a5723647c2ba0007ac.pdf"}, {"id": "13901473a12061f080b9d54219f16db7d406e769", "title": "High-Order Local Spatial Context Modeling by Spatialized Random Forest", "addresses": [{"address": "Advanced Digital Sciences Center, Singapore", "lat": "1.30372570", "lng": "103.77377630", "type": "edu"}, {"address": "University of Texas at San Antonio", "lat": "29.58333105", "lng": "-98.61944505", "type": "edu"}, {"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "Hefei University of Technology", "lat": "31.84691800", "lng": "117.29053367", "type": "edu"}], "year": 2013, "pdf": "https://doi.org/10.1109/TIP.2012.2222895"}, {"id": "3f5e8f884e71310d7d5571bd98e5a049b8175075", "title": "Making a Science of Model Search: Hyperparameter Optimization in Hundreds of Dimensions for Vision Architectures", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": "2013", "pdf": "https://pdfs.semanticscholar.org/3f5e/8f884e71310d7d5571bd98e5a049b8175075.pdf"}, {"id": "adaed4e92c93eb005198e41f87cf079e46050b5a", "title": "Discriminative Invariant Kernel Features: A Bells-and-Whistles-Free Approach to Unsupervised Face Recognition and Pose Estimation", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2016, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Pal_Discriminative_Invariant_Kernel_CVPR_2016_paper.pdf"}, {"id": "a2b4a6c6b32900a066d0257ae6d4526db872afe2", "title": "Learning Face Image Quality From Human Assessments", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272466"}, {"id": "99facca6fc50cc30f13b7b6dd49ace24bc94f702", "title": "VIPLFaceNet: an open source deep face recognition SDK", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1609.03892.pdf"}, {"id": "d0dd1364411a130448517ba532728d5c2fe78ed9", "title": "On-line machine learning accelerator on digital RRAM-crossbar", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ISCAS.2016.7527183"}, {"id": "8c66378df977606d332fc3b0047989e890a6ac76", "title": "Hierarchical-PEP model for real-world face recognition", "addresses": [{"address": "Stevens Institute of Technology", "lat": "40.74225200", "lng": "-74.02709490", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_078_ext.pdf"}, {"id": "9264b390aa00521f9bd01095ba0ba4b42bf84d7e", "title": "Displacement Template with Divide-&-Conquer Algorithm for Significantly Improving Descriptor Based Face Recognition Approaches", "addresses": [{"address": "Aberystwyth University", "lat": "52.41073580", "lng": "-4.05295501", "type": "edu"}, {"address": "University of Northern British Columbia", "lat": "53.89256620", "lng": "-122.81471592", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/9264/b390aa00521f9bd01095ba0ba4b42bf84d7e.pdf"}, {"id": "353b6c1f431feac6edde12b2dde7e6e702455abd", "title": "Multi-scale Patch Based Collaborative Representation for Face Recognition with Margin Distribution Optimization", "addresses": [{"address": "Tianjin University", "lat": "36.20304395", "lng": "117.05842113", "type": "edu"}, {"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/8835/c80f8ad8ebd05771a9bce5a8637efbc4c8e3.pdf"}, {"id": "4377b03bbee1f2cf99950019a8d4111f8de9c34a", "title": "Selective Encoding for Recognizing Unreliably Localized Faces", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2015, "pdf": "http://www.umiacs.umd.edu/~morariu/publications/LiSelectiveEncoderICCV15.pdf"}, {"id": "120bcc9879d953de7b2ecfbcd301f72f3a96fb87", "title": "Report on the FG 2015 Video Person Recognition Evaluation", "addresses": [{"address": "Colorado State University", "lat": "40.57093580", "lng": "-105.08655256", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}, {"address": "Stevens Institute of Technology", "lat": "40.74225200", "lng": "-74.02709490", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}, {"address": "University of Ljubljana", "lat": "46.05015580", "lng": "14.46907327", "type": "edu"}, {"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}, {"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}, {"address": "University of Technology Sydney", "lat": "-33.88096510", "lng": "151.20107299", "type": "edu"}], "year": 2015, "pdf": "http://www.cs.colostate.edu/~vision/pasc/docs/fg2015videoEvalPreprint.pdf"}, {"id": "e00d4e4ba25fff3583b180db078ef962bf7d6824", "title": "Face Verification with Multi-Task and Multi-Scale Features Fusion", "addresses": [{"address": "Northeastern University", "lat": "42.33836680", "lng": "-71.08793524", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/e00d/4e4ba25fff3583b180db078ef962bf7d6824.pdf"}, {"id": "5bde1718253ec28a753a892b0ba82d8e553b6bf3", "title": "Variational Relevance Vector Machine for Tabular Data", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}, {"address": "Lomonosov Moscow State University", "lat": "55.70229715", "lng": "37.53179777", "type": "edu"}, {"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/5bde/1718253ec28a753a892b0ba82d8e553b6bf3.pdf"}, {"id": "4349f17ec319ac8b25c14c2ec8c35f374b958066", "title": "Dynamic Texture Comparison Using Derivative Sparse Representation: Application to Video-Based Face Recognition", "addresses": [{"address": "Tafresh University", "lat": "34.68092465", "lng": "50.05341352", "type": "edu"}, {"address": "University of Technology Sydney", "lat": "-33.88096510", "lng": "151.20107299", "type": "edu"}, {"address": "University of Western Australia", "lat": "-31.95040445", "lng": "115.79790037", "type": "edu"}, {"address": "Griffith University", "lat": "-27.55339750", "lng": "153.05336234", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/THMS.2017.2681425"}, {"id": "b859d1fc1a7ad756815490527319d458fa9af3d2", "title": "Learning Structure and Strength of CNN Filters for Small Sample Size Training", "addresses": [{"address": "Indian Institute of Technology Delhi", "lat": "28.54632595", "lng": "77.27325504", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.11405.pdf"}, {"id": "122f52fadd4854cf6c9287013520eced3c91e71a", "title": "Robust Point Set Matching for Partial Face Recognition", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/TIP.2016.2515987"}, {"id": "771505abd38641454757de75fe751d41e87f89a4", "title": "Learning structured sparse representation for single sample face recognition", "addresses": [{"address": "HoHai University", "lat": "32.05765485", "lng": "118.75500040", "type": "edu"}, {"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}, {"address": "Nantong University", "lat": "31.97474630", "lng": "120.90779264", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8401561"}, {"id": "ac48ecbc7c3c1a7eab08820845d47d6ce197707c", "title": "Iterative Re-Constrained Group Sparse Face Recognition With Adaptive Weights Learning", "addresses": [{"address": "Zhejiang University of Technology", "lat": "30.29315340", "lng": "120.16204580", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2681841"}, {"id": "09f58353e48780c707cf24a0074e4d353da18934", "title": "Unconstrained face recognition: Establishing baseline human performance via crowdsourcing", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}, {"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2014, "pdf": "http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/BestrowdenBishtKlontzJain_CrowdsourcingHumanPeformance_IJCB2014.pdf"}, {"id": "0106a2f6251dc9ffc90709c6f0d9b54c1e82326b", "title": "Applying scattering operators for face recognition: A comparative study", "addresses": [{"address": "Institute of Information Science", "lat": "25.04107280", "lng": "121.61475620", "type": "edu"}, {"address": "National Taiwan University", "lat": "25.01682835", "lng": "121.53846924", "type": "edu"}], "year": 2012, "pdf": "http://www.iis.sinica.edu.tw/papers/song/14922-A.pdf"}, {"id": "0ad8149318912b5449085187eb3521786a37bc78", "title": "CP-mtML: Coupled Projection Multi-Task Metric Learning for Large Scale Face Retrieval", "addresses": [{"address": "University of Caen", "lat": "35.02749960", "lng": "135.78154513", "type": "edu"}], "year": 2016, "pdf": "http://arxiv.org/abs/1604.02975"}, {"id": "bc27434e376db89fe0e6ef2d2fabc100d2575ec6", "title": "Faceless Person Recognition; Privacy Implications in Social Media", "addresses": [{"address": "Max Planck Institute for Informatics", "lat": "49.25795660", "lng": "7.04577417", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1607.08438.pdf"}, {"id": "83295bce2340cb87901499cff492ae6ff3365475", "title": "Deep Multi-Center Learning for Face Alignment", "addresses": [{"address": "East China Normal University", "lat": "31.22849230", "lng": "121.40211389", "type": "edu"}, {"address": "Shanghai Jiao Tong University", "lat": "31.20081505", "lng": "121.42840681", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1808.01558.pdf"}, {"id": "5c4f9260762a450892856b189df240f25b5ed333", "title": "Discriminative Elastic-Net Regularized Linear Regression", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}, {"address": "Shenzhen University", "lat": "22.53521465", "lng": "113.93159110", "type": "edu"}, {"address": "University of East Anglia", "lat": "52.62215710", "lng": "1.24091360", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2651396"}, {"id": "57ebeff9273dea933e2a75c306849baf43081a8c", "title": "Deep Convolutional Network Cascade for Facial Point Detection", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Sun_Deep_Convolutional_Network_2013_CVPR_paper.pdf"}, {"id": "9ce97efc1d520dadaa0d114192ca789f23442727", "title": "Teaching Computer Vision: Bringing Research Benchmarks to the Classroom", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2014, "pdf": "http://doi.acm.org/10.1145/2597627"}, {"id": "4793f11fbca4a7dba898b9fff68f70d868e2497c", "title": "Kinship Verification through Transfer Learning", "addresses": [{"address": "SUNY Buffalo", "lat": "42.93362780", "lng": "-78.88394479", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf"}, {"id": "08d55271589f989d90a7edce3345f78f2468a7e0", "title": "Quality Aware Network for Set to Set Recognition", "addresses": [{"address": "University of Sydney", "lat": "-33.88890695", "lng": "151.18943366", "type": "edu"}], "year": 2017, "pdf": "https://arxiv.org/pdf/1704.03373v1.pdf"}, {"id": "3c18fb8ff0f5003fefa8e9dc9bebaf88908d255c", "title": "Is block matching an alternative tool to LBP for face recognition?", "addresses": [{"address": "KTH Royal Institute of Technology, Stockholm", "lat": "59.34986645", "lng": "18.07063213", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/ICIP.2014.7025145"}, {"id": "651cafb2620ab60a0e4f550c080231f20ae6d26e", "title": "4D unconstrained real-time face recognition using a commodity depth camera", "addresses": [{"address": "Singapore University of Technology and Design", "lat": "1.34021600", "lng": "103.96508900", "type": "edu"}, {"address": "University of Groningen", "lat": "53.21967825", "lng": "6.56251482", "type": "edu"}], "year": "2012", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6360717"}, {"id": "56e25358ebfaf8a8b3c7c33ed007e24f026065d0", "title": "V-shaped interval insensitive loss for ordinal classification", "addresses": [{"address": "Czech Technical University", "lat": "50.07642960", "lng": "14.41802312", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1007/s10994-015-5541-9"}, {"id": "ce37e11f4046a4b766b0e3228870ae4f26dddd67", "title": "Learning One-Shot Exemplar SVM from the Web for Face Verification", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/ce37/e11f4046a4b766b0e3228870ae4f26dddd67.pdf"}, {"id": "ab0d227b63b702ba80f70fd053175cd1b2fd28cc", "title": "Boosting Pseudo Census Transform Features for Face Alignment", "addresses": [{"address": "Karlsruhe Institute of Technology", "lat": "49.10184375", "lng": "8.43312560", "type": "edu"}], "year": "2011", "pdf": "https://pdfs.semanticscholar.org/0eed/cda8981740ae2c34ad5809dbdfcd817f2518.pdf"}, {"id": "4c170a0dcc8de75587dae21ca508dab2f9343974", "title": "FaceTracer: A Search Engine for Large Collections of Images with Faces", "addresses": [{"address": "Columbia University", "lat": "40.84198360", "lng": "-73.94368971", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf"}, {"id": "65f0b05052c3145a58c2653821e5429ca62555ce", "title": "Attacks Meet Interpretability: Attribute-steered Detection of Adversarial Samples", "addresses": [{"address": "Purdue University", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.11580.pdf"}, {"id": "edfce091688bc88389dd4877950bd58e00ff1253", "title": "A talking profile to distinguish identical twins", "addresses": [{"address": "Facebook", "lat": "37.39367170", "lng": "-122.08072620", "type": "company"}, {"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553700"}, {"id": "1e3068886b138304ec5a7296702879cc8788143d", "title": "Active Rare Class Discovery and Classification Using Dirichlet Processes", "addresses": [{"address": "Queen Mary University of London", "lat": "51.52472720", "lng": "-0.03931035", "type": "edu"}], "year": "2013", "pdf": "http://doi.org/10.1007/s11263-013-0630-3"}, {"id": "566a39d753c494f57b4464d6bde61bf3593f7ceb", "title": "A Critical Review of Action Recognition Benchmarks", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2013, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2013.43"}, {"id": "16fadde3e68bba301f9829b3f99157191106bd0f", "title": "Utility data annotation with Amazon Mechanical Turk", "addresses": [{"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": "2008", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4562953"}, {"id": "bd379f8e08f88729a9214260e05967f4ca66cd65", "title": "Learning Compositional Visual Concepts with Mutual Consistency", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1711.06148.pdf"}, {"id": "2ff9ffedfc59422a8c7dac418a02d1415eec92f1", "title": "Face Verification Using Boosted Cross-Image Features", "addresses": [{"address": "University of California, Berkeley", "lat": "37.86871260", "lng": "-122.25586815", "type": "edu"}, {"address": "University of Central Florida", "lat": "28.59899755", "lng": "-81.19712501", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/6e3b/778ad384101f792284b42844518f620143aa.pdf"}, {"id": "e22adcd2a6a7544f017ec875ce8f89d5c59e09c8", "title": "Gender Privacy: An Ensemble of Semi Adversarial Networks for Confounding Arbitrary Gender Classifiers", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11936.pdf"}, {"id": "22e678d3e915218a7c09af0d1602e73080658bb7", "title": "Adventures in archiving and using three years of webcam images", "addresses": [{"address": "Washington University", "lat": "38.64804450", "lng": "-90.30996670", "type": "edu"}], "year": 2009, "pdf": "http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2009_WS/data/papers/04/13.pdf"}, {"id": "c3c463a9ee464bb610423b7203300a83a166b500", "title": "Transform-invariant dictionary learning for face recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/ICIP.2014.7025069"}, {"id": "b75eecc879da38138bf3ace9195ae1613fb6e3cc", "title": "Improvement in Detection of Wrong-Patient Errors When Radiologists Include Patient Photographs in Their Interpretation of Portable Chest Radiographs", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1007/s10278-015-9808-2"}, {"id": "2af19b5ff2ca428fa42ef4b85ddbb576b5d9a5cc", "title": "Multi-Region Probabilistic Histograms for Robust and Scalable Identity Inference", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/2af1/9b5ff2ca428fa42ef4b85ddbb576b5d9a5cc.pdf"}, {"id": "919bdc161485615d5ee571b1585c1eb0539822c8", "title": "A ranking model for face alignment with Pseudo Census Transform", "addresses": [{"address": "Karlsruhe Institute of Technology", "lat": "49.10184375", "lng": "8.43312560", "type": "edu"}], "year": 2012, "pdf": "http://ieeexplore.ieee.org/document/6460332/"}, {"id": "a3a2f3803bf403262b56ce88d130af15e984fff0", "title": "Building a Compact Relevant Sample Coverage for Relevance Feedback in Content-Based Image Retrieval", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/e538/e1f6557d2920b449249606f909b665fbb924.pdf"}, {"id": "a192845a7695bdb372cccf008e6590a14ed82761", "title": "A Novel Local Pattern Descriptor—Local Vector Pattern in High-Order Derivative Space for Face Recognition", "addresses": [{"address": "National Central University", "lat": "24.96841805", "lng": "121.19139696", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/TIP.2014.2321495"}, {"id": "8db9188e5137e167bffb3ee974732c1fe5f7a7dc", "title": "Tree-Structured Nuclear Norm Approximation With Applications to Robust Face Recognition", "addresses": [{"address": "University of Macau", "lat": "22.12401870", "lng": "113.54510901", "type": "edu"}, {"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/TIP.2016.2612885"}, {"id": "21e158bcda4e10da88ee8da3799a6144b60d791f", "title": "Population Matching Discrepancy and Applications in Deep Learning", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/21e1/58bcda4e10da88ee8da3799a6144b60d791f.pdf"}, {"id": "2b7ef95822a4d577021df16607bf7b4a4514eb4b", "title": "Emergence of Object-Selective Features in Unsupervised Feature Learning", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/b596/9178f843bfaecd0026d04c41e79bcb9edab5.pdf"}, {"id": "3fe4109ded039ac9d58eb9f5baa5327af30ad8b6", "title": "Spatio-Temporal GrabCut human segmentation for face and pose recovery", "addresses": [{"address": "University of Barcelona", "lat": "41.38689130", "lng": "2.16352385", "type": "edu"}], "year": 2010, "pdf": "http://www.cvc.uab.cat/~ahernandez/files/CVPR2010STGRABCUT.pdf"}, {"id": "28be652db01273289499bc6e56379ca0237506c0", "title": "FaLRR: A fast low rank representation solver", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/3B_018_ext.pdf"}, {"id": "5981c309bd0ffd849c51b1d8a2ccc481a8ec2f5c", "title": "SmartFace: Efficient face detection on smartphones for wireless on-demand emergency networks", "addresses": [{"address": "Philipps-Universit\u00e4t Marburg", "lat": "50.81427010", "lng": "8.77143500", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ICT.2017.7998256"}, {"id": "1a40c2a2d17c52c8b9d20648647d0886e30a60fa", "title": "Hybrid hypergraph construction for facial expression recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICPR.2016.7900283"}, {"id": "70580ed8bc482cad66e059e838e4a779081d1648", "title": "Gender Classification using Multi-Level Wavelets on Real World Face Images", "addresses": [{"address": "Shaheed Zulfikar Ali Bhutto Institute of Science and Technology", "lat": "24.81865870", "lng": "67.03165850", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/7058/0ed8bc482cad66e059e838e4a779081d1648.pdf"}, {"id": "9fc04a13eef99851136eadff52e98eb9caac919d", "title": "Rethinking the Camera Pipeline for Computer Vision", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/9fc0/4a13eef99851136eadff52e98eb9caac919d.pdf"}, {"id": "0ca66283f4fb7dbc682f789fcf6d6732006befd5", "title": "Active Dictionary Learning for Image Representation", "addresses": [{"address": "State University of New Jersey", "lat": "40.51865195", "lng": "-74.44099801", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/0ca6/6283f4fb7dbc682f789fcf6d6732006befd5.pdf"}, {"id": "761304bbd259a9e419a2518193e1ff1face9fd2d", "title": "Robust and Computationally Efficient Face Detection Using Gaussian Derivative Features of Higher Orders", "addresses": [{"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": 2012, "pdf": "https://doi.org/10.1007/978-3-642-33885-4_57"}, {"id": "f5af3c28b290dc797c499283e2d0662570f9ed02", "title": "GenLR-Net : Deep framework for very low resolution face and object recognition with generalization to unseen categories", "addresses": [{"address": "Indian Institute of Science Bangalore", "lat": "13.02223470", "lng": "77.56718325", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/f5af/3c28b290dc797c499283e2d0662570f9ed02.pdf"}, {"id": "a0d6390dd28d802152f207940c7716fe5fae8760", "title": "Bayesian Face Revisited: A Joint Formulation", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/a0d6/390dd28d802152f207940c7716fe5fae8760.pdf"}, {"id": "54ba18952fe36c9be9f2ab11faecd43d123b389b", "title": "Triangular similarity metric learning for face verification", "addresses": [{"address": "University of Lyon", "lat": "45.78332440", "lng": "4.87819840", "type": "edu"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163085"}, {"id": "a2bd81be79edfa8dcfde79173b0a895682d62329", "title": "Multi-Objective Vehicle Routing Problem Applied to Large Scale Post Office Deliveries", "addresses": [{"address": "University of Campinas", "lat": "-27.59539950", "lng": "-48.61542180", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/a2bd/81be79edfa8dcfde79173b0a895682d62329.pdf"}, {"id": "4cfd770ccecae1c0b4248bc800d7fd35c817bbbd", "title": "A Discriminative Feature Learning Approach for Deep Face Recognition", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/8774/e206564df3bf9050f8c2be6b434cc2469c5b.pdf"}, {"id": "69b2a7533e38c2c8c9a0891a728abb423ad2c7e7", "title": "Manifold based sparse representation for facial understanding in natural images", "addresses": [{"address": "Rochester Institute of Technology", "lat": "43.08250655", "lng": "-77.67121663", "type": "edu"}], "year": 2013, "pdf": "https://doi.org/10.1016/j.imavis.2013.03.003"}, {"id": "0df0d1adea39a5bef318b74faa37de7f3e00b452", "title": "Appearance-based gaze estimation in the wild", "addresses": [{"address": "Max Planck Institute for Informatics", "lat": "49.25795660", "lng": "7.04577417", "type": "edu"}], "year": 2015, "pdf": "https://scalable.mpi-inf.mpg.de/files/2015/09/zhang_CVPR15.pdf"}, {"id": "633101e794d7b80f55f466fd2941ea24595e10e6", "title": "Face Attribute Prediction with classification CNN", "addresses": [{"address": "KTH Royal Institute of Technology, Stockholm", "lat": "59.34986645", "lng": "18.07063213", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/6331/01e794d7b80f55f466fd2941ea24595e10e6.pdf"}, {"id": "089b5e8eb549723020b908e8eb19479ba39812f5", "title": "A Cross Benchmark Assessment of a Deep Convolutional Neural Network for Face Recognition", "addresses": [{"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}], "year": 2017, "pdf": "http://www.face-recognition-challenge.com/RobustnessOfDCNN-preprint.pdf"}, {"id": "f27fd2a1bc229c773238f1912db94991b8bf389a", "title": "How do you develop a face detector for the unconstrained environment?", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/IVCNZ.2016.7804414"}, {"id": "afdf9a3464c3b015f040982750f6b41c048706f5", "title": "A Recurrent Encoder-Decoder Network for Sequential Face Alignment", "addresses": [{"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1608.05477.pdf"}, {"id": "a52a69bf304d49fba6eac6a73c5169834c77042d", "title": "Margin Loss: Making Faces More Separable", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/LSP.2017.2789251"}, {"id": "bc910ca355277359130da841a589a36446616262", "title": "Conditional High-Order Boltzmann Machine: A Supervised Learning Model for Relation Learning", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Huang_Conditional_High-Order_Boltzmann_ICCV_2015_paper.pdf"}, {"id": "93eb3963bc20e28af26c53ef3bce1e76b15e3209", "title": "Occlusion robust face recognition based on mask learning", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296992"}, {"id": "a06b6d30e2b31dc600f622ab15afe5e2929581a7", "title": "Robust Joint and Individual Variance Explained", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Middlesex University", "lat": "51.59029705", "lng": "-0.22963221", "type": "edu"}], "year": 2017, "pdf": "https://ibug.doc.ic.ac.uk/media/uploads/documents/2209.pdf"}, {"id": "565f7c767e6b150ebda491e04e6b1de759fda2d4", "title": "Fine-grained face verification: FGLFW database, baselines, and human-DCMN partnership", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1016/j.patcog.2016.11.023"}, {"id": "16b9d258547f1eccdb32111c9f45e2e4bbee79af", "title": "NormFace: L2 Hypersphere Embedding for Face Verification", "addresses": [{"address": "University of Electronic Science and Technology of China", "lat": "40.01419050", "lng": "-83.03091430", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1704.06369.pdf"}, {"id": "a73405038fdc0d8bf986539ef755a80ebd341e97", "title": "Conditional High-Order Boltzmann Machines for Supervised Relation Learning", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2698918"}, {"id": "e9a5a38e7da3f0aa5d21499149536199f2e0e1f7", "title": "A Bayesian Scene-Prior-Based Deep Network Model for Face Verification", "addresses": [{"address": "Curtin University", "lat": "-32.00686365", "lng": "115.89691775", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/e9a5/a38e7da3f0aa5d21499149536199f2e0e1f7.pdf"}, {"id": "1fe121925668743762ce9f6e157081e087171f4c", "title": "Unsupervised learning of overcomplete face descriptors", "addresses": [{"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": 2015, "pdf": "https://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W02/papers/Ylioinas_Unsupervised_Learning_of_2015_CVPR_paper.pdf"}, {"id": "6f2b36cadf3dd1648b709e9b4f4c19ffa1939ed1", "title": "Striking the Right Balance with Uncertainty", "addresses": [{"address": "University of Canberra", "lat": "-35.23656905", "lng": "149.08446994", "type": "edu"}], "year": "2019", "pdf": "https://arxiv.org/pdf/1901.07590.pdf"}, {"id": "84c7d3b1d407e0d435a08574a3f82ecacf7841b6", "title": "Max-margin Class Imbalanced Learning with Gaussian Affinity", "addresses": [{"address": "Australian National University", "lat": "-35.27769990", "lng": "149.11852700", "type": "edu"}], "year": "2019", "pdf": "https://arxiv.org/pdf/1901.07711.pdf"}, {"id": "0be418e63d111e3b94813875f75909e4dc27d13a", "title": "Fine-grained LFW database", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICB.2016.7550057"}, {"id": "3c563542db664321aa77a9567c1601f425500f94", "title": "TV-GAN: Generative Adversarial Network Based Thermal to Visible Face Recognition", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1712.02514.pdf"}, {"id": "1bf570bd40b3adced1e47dbcceffe50573f81845", "title": "Exponential Discriminative Metric Embedding in Deep Learning", "addresses": [{"address": "Tianjin University", "lat": "36.20304395", "lng": "117.05842113", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.02504.pdf"}, {"id": "81884e1de00e59f24bc20254584d73a1a1806933", "title": "Super-Identity Convolutional Neural Network for Face Hallucination", "addresses": [{"address": "National Taiwan University", "lat": "25.01682835", "lng": "121.53846924", "type": "edu"}, {"address": "SenseTime", "lat": "39.99300800", "lng": "116.32988200", "type": "company"}, {"address": "University of Texas at Austin", "lat": "30.28415100", "lng": "-97.73195598", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1811.02328.pdf"}, {"id": "1e6ed6ca8209340573a5e907a6e2e546a3bf2d28", "title": "Pooling Faces: Template Based Face Recognition with Pooled Face Images", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2016, "pdf": "http://arxiv.org/pdf/1607.01450v1.pdf"}, {"id": "06c956d4aac65752672ce4bd5a379f10a7fd6148", "title": "Stacking PCANet +: An Overly Simplified ConvNets Baseline for Face Recognition", "addresses": [{"address": "Yonsei University", "lat": "37.56004060", "lng": "126.93692480", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/LSP.2017.2749763"}, {"id": "740e095a65524d569244947f6eea3aefa3cca526", "title": "Towards Human-like Performance Face Detection: A Convolutional Neural Network Approach", "addresses": [{"address": "University of Twente", "lat": "52.23801390", "lng": "6.85667610", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/740e/095a65524d569244947f6eea3aefa3cca526.pdf"}, {"id": "2f16baddac6af536451b3216b02d3480fc361ef4", "title": "Web-scale training for face identification", "addresses": [{"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}], "year": 2015, "pdf": "http://cs.nyu.edu/~fergus/teaching/vision/10_facerec.pdf"}, {"id": "32d8e555441c47fc27249940991f80502cb70bd5", "title": "Machine Learning Models that Remember Too Much", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": 2017, "pdf": "https://arxiv.org/pdf/1709.07886v1.pdf"}, {"id": "7859667ed6c05a467dfc8a322ecd0f5e2337db56", "title": "Web-Scale Transfer Learning for Unconstrained 1:N Face Identification", "addresses": [{"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/7859/667ed6c05a467dfc8a322ecd0f5e2337db56.pdf"}, {"id": "7788fa76f1488b1597ee2bebc462f628e659f61e", "title": "A Privacy-Aware Architecture at the Edge for Autonomous Real-Time Identity Reidentification in Crowds", "addresses": [{"address": "University of Texas at San Antonio", "lat": "29.58333105", "lng": "-98.61944505", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8063888"}, {"id": "f467eb5e7f9d49ab318401f961109882c00f2720", "title": "Face/Off: Preventing Privacy Leakage From Photos in Social Networks", "addresses": [{"address": "Columbia University", "lat": "40.84198360", "lng": "-73.94368971", "type": "edu"}], "year": 2015, "pdf": "http://www.nsl.cs.columbia.edu/papers/2015/faceoff.ccs15.pdf"}, {"id": "84ae55603bffda40c225fe93029d39f04793e01f", "title": "ICB-RW 2016: International challenge on biometric recognition in the wild", "addresses": [{"address": "University of Beira Interior", "lat": "40.27730770", "lng": "-7.50958010", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICB.2016.7550066"}, {"id": "3b64efa817fd609d525c7244a0e00f98feacc8b4", "title": "A Comprehensive Survey on Pose-Invariant Face Recognition", "addresses": [{"address": "University of Technology Sydney", "lat": "-33.88096510", "lng": "151.20107299", "type": "edu"}], "year": 2016, "pdf": "http://doi.acm.org/10.1145/2845089"}, {"id": "b2e308649c7a502456a8e3c95ac7fbe6f8216e51", "title": "Recurrent Regression for Face Recognition", "addresses": [{"address": "Southeast University", "lat": "32.05752790", "lng": "118.78682252", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/b2e3/08649c7a502456a8e3c95ac7fbe6f8216e51.pdf"}, {"id": "5e48958c1c9ab9ccb5c9e1a62b81532700d38d83", "title": "ArtGAN: Artwork synthesis with conditional categorical GANs", "addresses": [{"address": "University of Malaya", "lat": "3.12267405", "lng": "101.65356103", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1702.03410.pdf"}, {"id": "5922e26c9eaaee92d1d70eae36275bb226ecdb2e", "title": "Boosting Classification Based Similarity Learning by using Standard Distances", "addresses": [{"address": "Universitat de Val\u00e8ncia", "lat": "39.47787665", "lng": "-0.34257711", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/5922/e26c9eaaee92d1d70eae36275bb226ecdb2e.pdf"}, {"id": "345cc31c85e19cea9f8b8521be6a37937efd41c2", "title": "Deep Manifold Traversal: Changing Labels with Convolutional Features", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": "2015", "pdf": "https://arxiv.org/pdf/1511.06421.pdf"}, {"id": "72a7eb68f0955564e1ceafa75aeeb6b5bbb14e7e", "title": "Face Recognition with Contrastive Convolution", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/72a7/eb68f0955564e1ceafa75aeeb6b5bbb14e7e.pdf"}, {"id": "47cd161546c59ab1e05f8841b82e985f72e5ddcb", "title": "Gender classification in live videos", "addresses": [{"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296552"}, {"id": "bd8f77b7d3b9d272f7a68defc1412f73e5ac3135", "title": "SphereFace: Deep Hypersphere Embedding for Face Recognition", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}, {"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1704.08063.pdf"}, {"id": "9ace71283834d4dd72509db6f1b859536f801d1c", "title": "Dynamic Deep Neural Networks: Optimizing Accuracy-Efficiency Trade-Offs by Selective Execution", "addresses": [{"address": "University of Michigan", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1701.00299.pdf"}, {"id": "e1630014a5ae3d2fb7ff6618f1470a567f4d90f5", "title": "Look, Listen and Learn - A Multimodal LSTM for Speaker Identification", "addresses": [{"address": "University of Hong Kong", "lat": "22.20814690", "lng": "114.25964115", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1602.04364.pdf"}, {"id": "344a5802999dddd0a6d1c4d511910af2eb922231", "title": "DroneFace: An Open Dataset for Drone Research", "addresses": [{"address": "Feng Chia University", "lat": "24.18005755", "lng": "120.64836072", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/f0ba/552418698d1b881c6f9f02e2c84f969e66f3.pdf"}, {"id": "600f164c81dbaa0327e7bd659fd9eb7f511f9e9a", "title": "A benchmark study of large-scale unconstrained face recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/BTAS.2014.6996301"}, {"id": "5180df9d5eb26283fb737f491623395304d57497", "title": "Scalable Angular Discriminative Deep Metric Learning for Face Recognition", "addresses": [{"address": "Tianjin University", "lat": "36.20304395", "lng": "117.05842113", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.10899.pdf"}, {"id": "55089f9bc858ae7e9addf30502ac11be4347c05a", "title": "A Privacy-Preserving Deep Learning Approach for Face Recognition with Edge Computing", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/5508/9f9bc858ae7e9addf30502ac11be4347c05a.pdf"}, {"id": "37af037923d0d7a4480a9c1f2e7d002f122bfebb", "title": "Recent Progress of Face Image Synthesis", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1706.04717.pdf"}]}
\ No newline at end of file +{"id": "2d3482dcff69c7417c7b933f22de606a0e8e42d4", "paper": {"paper_id": "2d3482dcff69c7417c7b933f22de606a0e8e42d4", "key": "lfw", "title": "Labeled Faces in the Wild : Updates and New Reporting Procedures", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf", "address": {"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}, "name": "LFW"}, "address": {"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}, "additional_papers": [{"paper_id": "370b5757a5379b15e30d619e4d3fb9e8e13f3256", "key": "lfw", "title": "Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf", "address": "", "name": "LFW"}, {"paper_id": "7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22", "key": "lfw", "title": "Labeled Faces in the Wild: A Survey", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf", "address": "", "name": "LFW"}], "citations": [{"id": "afdf9a3464c3b015f040982750f6b41c048706f5", "title": "A Recurrent Encoder-Decoder Network for Sequential Face Alignment", "addresses": [{"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1608.05477.pdf"}, {"id": "a52a69bf304d49fba6eac6a73c5169834c77042d", "title": "Margin Loss: Making Faces More Separable", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/LSP.2017.2789251"}, {"id": "bc910ca355277359130da841a589a36446616262", "title": "Conditional High-Order Boltzmann Machine: A Supervised Learning Model for Relation Learning", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Huang_Conditional_High-Order_Boltzmann_ICCV_2015_paper.pdf"}, {"id": "1e1d7cbbef67e9e042a3a0a9a1bcefcc4a9adacf", "title": "A Multi-level Contextual Model for Person Recognition in Photo Albums", "addresses": [{"address": "Stevens Institute of Technology", "lat": "40.74225200", "lng": "-74.02709490", "type": "edu"}], "year": 2016, "pdf": "http://personal.stevens.edu/~hli18//data/papers/CVPR2016_CameraReady.pdf"}, {"id": "291265db88023e92bb8c8e6390438e5da148e8f5", "title": "MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition", "addresses": [{"address": "Microsoft", "lat": "47.64233180", "lng": "-122.13693020", "type": "company"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf"}, {"id": "c222f8079c246ead285894c47bdbb2dfc7741044", "title": "Face de-identification with expressions preservation", "addresses": [{"address": "Bordeaux INP, France", "lat": "44.80557160", "lng": "-0.60519720", "type": "edu"}, {"address": "University of Bordeaux, France", "lat": "44.80837500", "lng": "-0.59670500", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICIP.2015.7351631"}, {"id": "93eb3963bc20e28af26c53ef3bce1e76b15e3209", "title": "Occlusion robust face recognition based on mask learning", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296992"}, {"id": "a06b6d30e2b31dc600f622ab15afe5e2929581a7", "title": "Robust Joint and Individual Variance Explained", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Middlesex University", "lat": "51.59029705", "lng": "-0.22963221", "type": "edu"}], "year": 2017, "pdf": "https://ibug.doc.ic.ac.uk/media/uploads/documents/2209.pdf"}, {"id": "565f7c767e6b150ebda491e04e6b1de759fda2d4", "title": "Fine-grained face verification: FGLFW database, baselines, and human-DCMN partnership", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1016/j.patcog.2016.11.023"}, {"id": "16b9d258547f1eccdb32111c9f45e2e4bbee79af", "title": "NormFace: L2 Hypersphere Embedding for Face Verification", "addresses": [{"address": "University of Electronic Science and Technology of China", "lat": "40.01419050", "lng": "-83.03091430", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1704.06369.pdf"}, {"id": "a73405038fdc0d8bf986539ef755a80ebd341e97", "title": "Conditional High-Order Boltzmann Machines for Supervised Relation Learning", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2698918"}, {"id": "e9a5a38e7da3f0aa5d21499149536199f2e0e1f7", "title": "A Bayesian Scene-Prior-Based Deep Network Model for Face Verification", "addresses": [{"address": "Curtin University", "lat": "-32.00686365", "lng": "115.89691775", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/e9a5/a38e7da3f0aa5d21499149536199f2e0e1f7.pdf"}, {"id": "63a6c256ec2cf2e0e0c9a43a085f5bc94af84265", "title": "Complexity of multiverse networks and their multilayer generalization", "addresses": [{"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICPR.2016.7899662"}, {"id": "1fe121925668743762ce9f6e157081e087171f4c", "title": "Unsupervised learning of overcomplete face descriptors", "addresses": [{"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": 2015, "pdf": "https://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W02/papers/Ylioinas_Unsupervised_Learning_of_2015_CVPR_paper.pdf"}, {"id": "6f2b36cadf3dd1648b709e9b4f4c19ffa1939ed1", "title": "Striking the Right Balance with Uncertainty", "addresses": [{"address": "University of Canberra", "lat": "-35.23656905", "lng": "149.08446994", "type": "edu"}], "year": "2019", "pdf": "https://arxiv.org/pdf/1901.07590.pdf"}, {"id": "84c7d3b1d407e0d435a08574a3f82ecacf7841b6", "title": "Max-margin Class Imbalanced Learning with Gaussian Affinity", "addresses": [{"address": "Australian National University", "lat": "-35.27769990", "lng": "149.11852700", "type": "edu"}], "year": "2019", "pdf": "https://arxiv.org/pdf/1901.07711.pdf"}, {"id": "0be418e63d111e3b94813875f75909e4dc27d13a", "title": "Fine-grained LFW database", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICB.2016.7550057"}, {"id": "3c563542db664321aa77a9567c1601f425500f94", "title": "TV-GAN: Generative Adversarial Network Based Thermal to Visible Face Recognition", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1712.02514.pdf"}, {"id": "4cfd770ccecae1c0b4248bc800d7fd35c817bbbd", "title": "A Discriminative Feature Learning Approach for Deep Face Recognition", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/8774/e206564df3bf9050f8c2be6b434cc2469c5b.pdf"}, {"id": "1bf570bd40b3adced1e47dbcceffe50573f81845", "title": "Exponential Discriminative Metric Embedding in Deep Learning", "addresses": [{"address": "Tianjin University", "lat": "36.20304395", "lng": "117.05842113", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.02504.pdf"}, {"id": "81884e1de00e59f24bc20254584d73a1a1806933", "title": "Super-Identity Convolutional Neural Network for Face Hallucination", "addresses": [{"address": "National Taiwan University", "lat": "25.01682835", "lng": "121.53846924", "type": "edu"}, {"address": "SenseTime", "lat": "39.99300800", "lng": "116.32988200", "type": "company"}, {"address": "University of Texas at Austin", "lat": "30.28415100", "lng": "-97.73195598", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1811.02328.pdf"}, {"id": "1e6ed6ca8209340573a5e907a6e2e546a3bf2d28", "title": "Pooling Faces: Template Based Face Recognition with Pooled Face Images", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2016, "pdf": "http://arxiv.org/pdf/1607.01450v1.pdf"}, {"id": "06c956d4aac65752672ce4bd5a379f10a7fd6148", "title": "Stacking PCANet +: An Overly Simplified ConvNets Baseline for Face Recognition", "addresses": [{"address": "Yonsei University", "lat": "37.56004060", "lng": "126.93692480", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/LSP.2017.2749763"}, {"id": "740e095a65524d569244947f6eea3aefa3cca526", "title": "Towards Human-like Performance Face Detection: A Convolutional Neural Network Approach", "addresses": [{"address": "University of Twente", "lat": "52.23801390", "lng": "6.85667610", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/740e/095a65524d569244947f6eea3aefa3cca526.pdf"}, {"id": "8c66378df977606d332fc3b0047989e890a6ac76", "title": "Hierarchical-PEP model for real-world face recognition", "addresses": [{"address": "Stevens Institute of Technology", "lat": "40.74225200", "lng": "-74.02709490", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_078_ext.pdf"}, {"id": "71ca8b6e84c17b3e68f980bfb8cddc837100f8bf", "title": "Effective 3D based frontalization for unconstrained face recognition", "addresses": [{"address": "University of Florence", "lat": "43.77764260", "lng": "11.25976500", "type": "edu"}], "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899774"}, {"id": "2f16baddac6af536451b3216b02d3480fc361ef4", "title": "Web-scale training for face identification", "addresses": [{"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}], "year": 2015, "pdf": "http://cs.nyu.edu/~fergus/teaching/vision/10_facerec.pdf"}, {"id": "32d8e555441c47fc27249940991f80502cb70bd5", "title": "Machine Learning Models that Remember Too Much", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": 2017, "pdf": "https://arxiv.org/pdf/1709.07886v1.pdf"}, {"id": "7859667ed6c05a467dfc8a322ecd0f5e2337db56", "title": "Web-Scale Transfer Learning for Unconstrained 1:N Face Identification", "addresses": [{"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/7859/667ed6c05a467dfc8a322ecd0f5e2337db56.pdf"}, {"id": "7788fa76f1488b1597ee2bebc462f628e659f61e", "title": "A Privacy-Aware Architecture at the Edge for Autonomous Real-Time Identity Reidentification in Crowds", "addresses": [{"address": "University of Texas at San Antonio", "lat": "29.58333105", "lng": "-98.61944505", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8063888"}, {"id": "f467eb5e7f9d49ab318401f961109882c00f2720", "title": "Face/Off: Preventing Privacy Leakage From Photos in Social Networks", "addresses": [{"address": "Columbia University", "lat": "40.84198360", "lng": "-73.94368971", "type": "edu"}], "year": 2015, "pdf": "http://www.nsl.cs.columbia.edu/papers/2015/faceoff.ccs15.pdf"}, {"id": "84ae55603bffda40c225fe93029d39f04793e01f", "title": "ICB-RW 2016: International challenge on biometric recognition in the wild", "addresses": [{"address": "University of Beira Interior", "lat": "40.27730770", "lng": "-7.50958010", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICB.2016.7550066"}, {"id": "3b64efa817fd609d525c7244a0e00f98feacc8b4", "title": "A Comprehensive Survey on Pose-Invariant Face Recognition", "addresses": [{"address": "University of Technology Sydney", "lat": "-33.88096510", "lng": "151.20107299", "type": "edu"}], "year": 2016, "pdf": "http://doi.acm.org/10.1145/2845089"}, {"id": "f92ade569cbe54344ffd3bb25efd366dcd8ad659", "title": "Effect of Super Resolution on High Dimensional Features for Unsupervised Face Recognition in the Wild", "addresses": [{"address": "University of Bridgeport", "lat": "41.16648580", "lng": "-73.19205640", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1704.01464.pdf"}, {"id": "b2e308649c7a502456a8e3c95ac7fbe6f8216e51", "title": "Recurrent Regression for Face Recognition", "addresses": [{"address": "Southeast University", "lat": "32.05752790", "lng": "118.78682252", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/b2e3/08649c7a502456a8e3c95ac7fbe6f8216e51.pdf"}, {"id": "5e48958c1c9ab9ccb5c9e1a62b81532700d38d83", "title": "ArtGAN: Artwork synthesis with conditional categorical GANs", "addresses": [{"address": "University of Malaya", "lat": "3.12267405", "lng": "101.65356103", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1702.03410.pdf"}, {"id": "5922e26c9eaaee92d1d70eae36275bb226ecdb2e", "title": "Boosting Classification Based Similarity Learning by using Standard Distances", "addresses": [{"address": "Universitat de Val\u00e8ncia", "lat": "39.47787665", "lng": "-0.34257711", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/5922/e26c9eaaee92d1d70eae36275bb226ecdb2e.pdf"}, {"id": "86afb1e38a96f2ac00e792ef353a971fd13c8474", "title": "How interesting images are: An atypicality approach for social networks", "addresses": [{"address": "University of Hawaii", "lat": "21.29827950", "lng": "-157.81869230", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/BigData.2016.7840742"}, {"id": "345cc31c85e19cea9f8b8521be6a37937efd41c2", "title": "Deep Manifold Traversal: Changing Labels with Convolutional Features", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": "2015", "pdf": "https://arxiv.org/pdf/1511.06421.pdf"}, {"id": "72a7eb68f0955564e1ceafa75aeeb6b5bbb14e7e", "title": "Face Recognition with Contrastive Convolution", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/72a7/eb68f0955564e1ceafa75aeeb6b5bbb14e7e.pdf"}, {"id": "628a3f027b7646f398c68a680add48c7969ab1d9", "title": "Plan for Final Year Project : HKU-Face : A Large Scale Dataset for Deep Face Recognition", "addresses": [{"address": "Facebook", "lat": "37.39367170", "lng": "-122.08072620", "type": "company"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/628a/3f027b7646f398c68a680add48c7969ab1d9.pdf"}, {"id": "47cd161546c59ab1e05f8841b82e985f72e5ddcb", "title": "Gender classification in live videos", "addresses": [{"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296552"}, {"id": "bd8f77b7d3b9d272f7a68defc1412f73e5ac3135", "title": "SphereFace: Deep Hypersphere Embedding for Face Recognition", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}, {"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1704.08063.pdf"}, {"id": "9ace71283834d4dd72509db6f1b859536f801d1c", "title": "Dynamic Deep Neural Networks: Optimizing Accuracy-Efficiency Trade-Offs by Selective Execution", "addresses": [{"address": "University of Michigan", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1701.00299.pdf"}, {"id": "e1630014a5ae3d2fb7ff6618f1470a567f4d90f5", "title": "Look, Listen and Learn - A Multimodal LSTM for Speaker Identification", "addresses": [{"address": "University of Hong Kong", "lat": "22.20814690", "lng": "114.25964115", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1602.04364.pdf"}, {"id": "344a5802999dddd0a6d1c4d511910af2eb922231", "title": "DroneFace: An Open Dataset for Drone Research", "addresses": [{"address": "Feng Chia University", "lat": "24.18005755", "lng": "120.64836072", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/f0ba/552418698d1b881c6f9f02e2c84f969e66f3.pdf"}, {"id": "600f164c81dbaa0327e7bd659fd9eb7f511f9e9a", "title": "A benchmark study of large-scale unconstrained face recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/BTAS.2014.6996301"}, {"id": "5180df9d5eb26283fb737f491623395304d57497", "title": "Scalable Angular Discriminative Deep Metric Learning for Face Recognition", "addresses": [{"address": "Tianjin University", "lat": "36.20304395", "lng": "117.05842113", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.10899.pdf"}, {"id": "adaed4e92c93eb005198e41f87cf079e46050b5a", "title": "Discriminative Invariant Kernel Features: A Bells-and-Whistles-Free Approach to Unsupervised Face Recognition and Pose Estimation", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2016, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Pal_Discriminative_Invariant_Kernel_CVPR_2016_paper.pdf"}, {"id": "55089f9bc858ae7e9addf30502ac11be4347c05a", "title": "A Privacy-Preserving Deep Learning Approach for Face Recognition with Edge Computing", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/5508/9f9bc858ae7e9addf30502ac11be4347c05a.pdf"}, {"id": "37af037923d0d7a4480a9c1f2e7d002f122bfebb", "title": "Recent Progress of Face Image Synthesis", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1706.04717.pdf"}, {"id": "10f66f6550d74b817a3fdcef7fdeba13ccdba51c", "title": "Benchmarking Face Alignment", "addresses": [{"address": "Karlsruhe Institute of Technology", "lat": "49.10184375", "lng": "8.43312560", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/10f6/6f6550d74b817a3fdcef7fdeba13ccdba51c.pdf"}, {"id": "19458454308a9f56b7de76bf7d8ff8eaa52b0173", "title": "Deep Features for Recognizing Disguised Faces in the Wild", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/1945/8454308a9f56b7de76bf7d8ff8eaa52b0173.pdf"}, {"id": "44b827df6c433ca49bcf44f9f3ebfdc0774ee952", "title": "Deep Correlation Feature Learning for Face Verification in the Wild", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/LSP.2017.2726105"}, {"id": "a0b1990dd2b4cd87e4fd60912cc1552c34792770", "title": "Deep Constrained Local Models for Facial Landmark Detection", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/a0b1/990dd2b4cd87e4fd60912cc1552c34792770.pdf"}, {"id": "3c57e28a4eb463d532ea2b0b1ba4b426ead8d9a0", "title": "Defeating Image Obfuscation with Deep Learning", "addresses": [{"address": "University of Texas at Austin", "lat": "30.28415100", "lng": "-97.73195598", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/73cc/fdedbd7d72a147925727ba1932f9488cfde3.pdf"}, {"id": "d3a3d15a32644beffaac4322b9f165ed51cfd99b", "title": "Eye detection by using deep learning", "addresses": [{"address": "Gebze Technical University, Turkey", "lat": "40.80805620", "lng": "29.35612020", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/SIU.2016.7496197"}, {"id": "b13bf657ca6d34d0df90e7ae739c94a7efc30dc3", "title": "Attribute and Simile Classifiers for Face Verification (In submission please do not distribute.)", "addresses": [{"address": "Columbia University", "lat": "40.84198360", "lng": "-73.94368971", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/b13b/f657ca6d34d0df90e7ae739c94a7efc30dc3.pdf"}, {"id": "93af36da08bf99e68c9b0d36e141ed8154455ac2", "title": "A Dditive M Argin S Oftmax for F Ace V Erification", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}, {"address": "University of Electronic Science and Technology of China", "lat": "40.01419050", "lng": "-83.03091430", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/93af/36da08bf99e68c9b0d36e141ed8154455ac2.pdf"}, {"id": "280bc9751593897091015aaf2cab39805768b463", "title": "Gender Perception From Faces Using Boosted LBPH (Local Binary Patten Histograms)", "addresses": [{"address": "COMSATS Institute of Information Technology, Lahore", "lat": "31.40063320", "lng": "74.21372960", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/280b/c9751593897091015aaf2cab39805768b463.pdf"}, {"id": "e57ce6244ec696ff9aa42d6af7f09eed176153a8", "title": "Instantaneous real-time head pose at a distance", "addresses": [{"address": "Heriot-Watt University", "lat": "55.91029135", "lng": "-3.32345777", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICIP.2015.7351449"}, {"id": "bd9e0b6a90b51cc19b65f51dacd08ce1a7ccaac5", "title": "Avatar recommendation method based on facial attributes", "addresses": [{"address": "Korea Advanced Institute of Science and Technology", "lat": "36.36971910", "lng": "127.36253700", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/VSMM.2014.7136653"}, {"id": "9821669a989a3df9d598c1b4332d17ae8e35e294", "title": "Minimal Correlation Classification", "addresses": [{"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/9821/669a989a3df9d598c1b4332d17ae8e35e294.pdf"}, {"id": "04661729f0ff6afe4b4d6223f18d0da1d479accf", "title": "From Facial Parts Responses to Face Detection: A Deep Learning Approach", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.419"}, {"id": "1d5aad4f7fae6d414ffb212cec1f7ac876de48bf", "title": "Face retriever: Pre-filtering the gallery via deep neural net", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICB.2015.7139112"}, {"id": "03babadaaa7e71d4b65203e27e8957db649155c6", "title": "Distance Metric Learning via Iterated Support Vector Machines", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}, {"address": "Xi\u2019an Jiaotong University", "lat": "34.25080300", "lng": "108.98369300", "type": "edu"}, {"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}, {"address": "Educational Testing Service", "lat": "40.34946320", "lng": "-74.71481500", "type": "company"}, {"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2725578"}, {"id": "20eabf10e9591443de95b726d90cda8efa7e53bb", "title": "Discriminative Histogram Intersection Metric Learning and Its Applications", "addresses": [{"address": "Zhejiang University of Technology", "lat": "30.29315340", "lng": "120.16204580", "type": "edu"}, {"address": "Waseda University", "lat": "33.88987280", "lng": "130.70856205", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1007/s11390-017-1740-0"}, {"id": "0cdb49142f742f5edb293eb9261f8243aee36e12", "title": "Combined Learning of Salient Local Descriptors and Distance Metrics for Image Set Face Verification", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": 2012, "pdf": "http://arxiv.org/abs/1303.2783"}, {"id": "24b637c98b22cd932f74acfeecdb50533abea9ae", "title": "Robust Face Recognition via Minimum Error Entropy-Based Atomic Representation", "addresses": [{"address": "Hubei University", "lat": "30.48176100", "lng": "114.31096000", "type": "edu"}, {"address": "University of Macau", "lat": "22.12401870", "lng": "113.54510901", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/TIP.2015.2492819"}, {"id": "9d66de2a59ec20ca00a618481498a5320ad38481", "title": "POP: Privacy-Preserving Outsourced Photo Sharing and Searching for Mobile Devices", "addresses": [{"address": "Illinois Institute of Technology", "lat": "41.83619630", "lng": "-87.62655913", "type": "edu"}, {"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2015, "pdf": "http://www.cs.iit.edu/~xli/paper/Conf/POP-ICDCS15.pdf"}, {"id": "2a98b850139b911df5a336d6ebf33be7819ae122", "title": "Maximum entropy regularized group collaborative representation for face recognition", "addresses": [{"address": "Georgia Southern University", "lat": "32.42143805", "lng": "-81.78450529", "type": "edu"}, {"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICIP.2015.7350806"}, {"id": "14b016c7a87d142f4b9a0e6dc470dcfc073af517", "title": "Modest proposals for improving biometric recognition papers", "addresses": [{"address": "San Jose State University", "lat": "37.33519080", "lng": "-121.88126008", "type": "edu"}], "year": 2015, "pdf": "http://ws680.nist.gov/publication/get_pdf.cfm?pub_id=918912"}, {"id": "0aeb5020003e0c89219031b51bd30ff1bceea363", "title": "Sparsifying Neural Network Connections for Face Recognition", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.525"}, {"id": "3ff79cf6df1937949cc9bc522041a9a39d314d83", "title": "Adversarial examples: A survey", "addresses": [{"address": "Warsaw University of Technology", "lat": "52.22165395", "lng": "21.00735776", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8406730"}, {"id": "676f9eabf4cfc1fd625228c83ff72f6499c67926", "title": "Face Identification and Clustering", "addresses": [{"address": "State University of New Jersey", "lat": "40.51865195", "lng": "-74.44099801", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/676f/9eabf4cfc1fd625228c83ff72f6499c67926.pdf"}, {"id": "9ed4ad41cbad645e7109e146ef6df73f774cd75d", "title": "RPM: Random Points Matching for Pair wise Face-Similarity", "addresses": [{"address": "Karlsruhe Institute of Technology", "lat": "49.10184375", "lng": "8.43312560", "type": "edu"}, {"address": "ETH Z\u00fcrich", "lat": "47.37645340", "lng": "8.54770931", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/a83e/175ad5b2066e207f5d2ec830ae05bac266b9.pdf"}, {"id": "33ad23377eaead8955ed1c2b087a5e536fecf44e", "title": "Augmenting CRFs with Boltzmann Machine Shape Priors for Image Labeling", "addresses": [{"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}, {"address": "University of Michigan", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu"}], "year": 2013, "pdf": "http://vis-www.cs.umass.edu/papers/gloc_cvpr13.pdf"}, {"id": "e8aa1f207b4b0bb710f79ab47a671d5639696a56", "title": "Exploiting symmetry in two-dimensional clustering-based discriminant analysis for face recognition", "addresses": [{"address": "Aristotle University of Thessaloniki", "lat": "40.62984145", "lng": "22.95889350", "type": "edu"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7362364"}, {"id": "adce9902dca7f4e8a9b9cf6686ec6a7c0f2a0ba6", "title": "Two Birds, One Stone: Jointly Learning Binary Code for Large-Scale Face Image Retrieval and Attributes Prediction", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.435"}, {"id": "fe961cbe4be0a35becd2d722f9f364ec3c26bd34", "title": "Computer-based Tracking, Analysis, and Visualization of Linguistically Significant Nonmanual Events in American Sign Language (ASL)", "addresses": [{"address": "Boston University", "lat": "42.35042530", "lng": "-71.10056114", "type": "edu"}, {"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/fe96/1cbe4be0a35becd2d722f9f364ec3c26bd34.pdf"}, {"id": "b98e7a8f605c21e25ac5e32bfb1851a01f30081b", "title": "Deep nonlinear metric learning with independent subspace analysis for face verification", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2012, "pdf": "http://doi.acm.org/10.1145/2393347.2396303"}, {"id": "3d89f9b4da3d6fb1fdb33dea7592b5992069a096", "title": "Face recognition based on convolution siamese networks", "addresses": [{"address": "University of the Chinese Academy of Sciences", "lat": "30.57281500", "lng": "104.06680100", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/CISP-BMEI.2017.8302003"}, {"id": "e51927b125640bfc47bbf1aa00c3c026748c75bd", "title": "Automatic Facial Image Annotation and Retrieval by Integrating Voice Label and Visual Appearance", "addresses": [{"address": "National Taiwan University", "lat": "25.01682835", "lng": "121.53846924", "type": "edu"}], "year": 2014, "pdf": "http://doi.acm.org/10.1145/2647868.2655015"}, {"id": "4e8c608fc4b8198f13f8a68b9c1a0780f6f50105", "title": "How Related Exemplars Help Complex Event Detection in Web Videos?", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}, {"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Yang_How_Related_Exemplars_2013_ICCV_paper.pdf"}, {"id": "ce8db0fe11e7c96d08de561506f9f8f399dabbb2", "title": "Weighted sparse representation using a learned distance metric for face recognition", "addresses": [{"address": "Korea University", "lat": "37.59014110", "lng": "127.03623180", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICIP.2015.7351677"}, {"id": "2836d68c86f29bb87537ea6066d508fde838ad71", "title": "Personalized Age Progression with Aging Dictionary", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2015, "pdf": "http://arxiv.org/pdf/1510.06503v1.pdf"}, {"id": "3f7cf52fb5bf7b622dce17bb9dfe747ce4a65b96", "title": "Person Identity Label Propagation in Stereo Videos", "addresses": [{"address": "Aristotle University of Thessaloniki", "lat": "40.62984145", "lng": "22.95889350", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/TMM.2014.2315595"}, {"id": "dac34b590adddef2fc31f26e2aeb0059115d07a1", "title": "House in the (Biometric) Cloud: A Possible Application", "addresses": [{"address": "Sapienza University of Rome", "lat": "41.90376260", "lng": "12.51443840", "type": "edu"}, {"address": "Sapienza Univertsity of Rome", "lat": "41.90376260", "lng": "12.51443840", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8436078"}, {"id": "210b98394c3be96e7fd75d3eb11a391da1b3a6ca", "title": "Spatiotemporal Derivative Pattern: A Dynamic Texture Descriptor for Video Matching", "addresses": [{"address": "University of Western Australia", "lat": "-31.95040445", "lng": "115.79790037", "type": "edu"}, {"address": "Islamic Azad University", "lat": "34.84529990", "lng": "48.55962120", "type": "edu"}, {"address": "Tafresh University", "lat": "34.68092465", "lng": "50.05341352", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/210b/98394c3be96e7fd75d3eb11a391da1b3a6ca.pdf"}, {"id": "125d82fee1b9fbcc616622b0977f3d06771fc152", "title": "Hierarchical face parsing via deep learning", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2012, "pdf": "http://www.ee.cuhk.edu.hk/~xgwang/papers/luoWTcvpr12.pdf"}, {"id": "ccfebdf7917cb50b5fcd56fb837f841a2246a149", "title": "A feature subtraction method for image based kinship verification under uncontrolled environments", "addresses": [{"address": "Aalborg University", "lat": "57.01590275", "lng": "9.97532827", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICIP.2015.7351065"}, {"id": "a538b05ebb01a40323997629e171c91aa28b8e2f", "title": "Rectified Linear Units Improve Restricted Boltzmann Machines", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/a538/b05ebb01a40323997629e171c91aa28b8e2f.pdf"}, {"id": "4cdb6144d56098b819076a8572a664a2c2d27f72", "title": "Face Synthesis for Eyeglass-Robust Face Recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.01196.pdf"}, {"id": "472ba8dd4ec72b34e85e733bccebb115811fd726", "title": "Cosine Similarity Metric Learning for Face Verification", "addresses": [{"address": "University of Nottingham", "lat": "52.93874280", "lng": "-1.20029569", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/472b/a8dd4ec72b34e85e733bccebb115811fd726.pdf"}, {"id": "6880013eb0b91a2b334e0be0dced0a1a79943469", "title": "Discrimination-aware Channel Pruning for Deep Neural Networks", "addresses": [{"address": "South China University of Technology", "lat": "23.05020420", "lng": "113.39880323", "type": "edu"}, {"address": "University of Texas at Arlington", "lat": "32.72836830", "lng": "-97.11201835", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.11809.pdf"}, {"id": "0b242d5123f79defd5f775d49d8a7047ad3153bc", "title": "How Important Is Weight Symmetry in Backpropagation?", "addresses": [{"address": "McGovern Institute for Brain Research", "lat": "42.36262950", "lng": "-71.09144810", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/84db/c0010ae4f5206d689cf9f5bb176d18990bcd.pdf"}, {"id": "e293a31260cf20996d12d14b8f29a9d4d99c4642", "title": "LR-GAN: Layered Recursive Generative Adversarial Networks for Image Generation", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1703.01560.pdf"}, {"id": "6f9824c5cb5ac08760b08e374031cbdabc953bae", "title": "Unconstrained human identification using comparative facial soft biometrics", "addresses": [{"address": "University of Southampton", "lat": "50.89273635", "lng": "-1.39464295", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/BTAS.2016.7791206"}, {"id": "21258aa3c48437a2831191b71cd069c05fb84cf7", "title": "A Robust and Efficient Doubly Regularized Metric Learning Approach", "addresses": [{"address": "University of Florida", "lat": "29.63287840", "lng": "-82.34901330", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/2125/8aa3c48437a2831191b71cd069c05fb84cf7.pdf"}, {"id": "78d645d5b426247e9c8f359694080186681f57db", "title": "Gender Classification by LUT Based Boosting of Overlapping Block Patterns", "addresses": [{"address": "IDIAP Research Institute", "lat": "46.10923700", "lng": "7.08453549", "type": "edu"}, {"address": "Tampere University of Technology", "lat": "61.44964205", "lng": "23.85877462", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/78d6/45d5b426247e9c8f359694080186681f57db.pdf"}, {"id": "2a35d20b2c0a045ea84723f328321c18be6f555c", "title": "Boost Picking: A Universal Method on Converting Supervised Classification to Semi-supervised Classification", "addresses": [{"address": "Beijing Institute of Technology", "lat": "39.95866520", "lng": "116.30971281", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d1be/cba3c460892453939f9f3639d8beddf2a133.pdf"}, {"id": "5ef3e7a2c8d2876f3c77c5df2bbaea8a777051a7", "title": "Rendering or normalization? An analysis of the 3D-aided pose-invariant face recognition", "addresses": [{"address": "University of Houston", "lat": "29.72079020", "lng": "-95.34406271", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ISBA.2016.7477244"}, {"id": "9be653e1bc15ef487d7f93aad02f3c9552f3ee4a", "title": "Computer Vision for Head Pose Estimation: Review of a Competition", "addresses": [{"address": "Tampere University of Technology", "lat": "61.44964205", "lng": "23.85877462", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/9be6/53e1bc15ef487d7f93aad02f3c9552f3ee4a.pdf"}, {"id": "96e0cfcd81cdeb8282e29ef9ec9962b125f379b0", "title": "The MegaFace Benchmark: 1 Million Faces for Recognition at Scale", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527"}, {"id": "3d0c21d4780489bd624a74b07e28c16175df6355", "title": "Deep or Shallow Facial Descriptors? A Case for Facial Attribute Classification and Face Retrieval", "addresses": [{"address": "Multimedia University", "lat": "2.92749755", "lng": "101.64185301", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/3d0c/21d4780489bd624a74b07e28c16175df6355.pdf"}, {"id": "48906f609446afcdaacbe1d65770d7a6165a8eee", "title": "Storages Are Not Forever", "addresses": [{"address": "RWTH Aachen University", "lat": "50.77917030", "lng": "6.06728733", "type": "edu"}, {"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1007/s12559-017-9482-4"}, {"id": "31f905d40a4ac3c16c91d5be8427762fa91277f1", "title": "Learning Rotation-Invariant Local Binary Descriptor", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2704661"}, {"id": "0f64e26d6dd6f1c99fe2050887fac26cafe9ed60", "title": "Bridging the Gap Between Forensics and Biometric-Enabled Watchlists for e-Borders", "addresses": [{"address": "University of Calgary", "lat": "51.07840380", "lng": "-114.12870770", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/MCI.2016.2627668"}, {"id": "2969f822b118637af29d8a3a0811ede2751897b5", "title": "Cascaded Shape Space Pruning for Robust Facial Landmark Detection", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2013, "pdf": "http://iip.ict.ac.cn/sites/default/files/publication/2013_ICCV_xwzhao_Cascaded%20Shape%20Space%20Pruning%20for%20Robust%20Facial%20Landmark%20Detection.pdf"}, {"id": "4934d44aa89b6d871eb6709dd1d1eebf16f3aaf1", "title": "A Deep Sum-Product Architecture for Robust Facial Attributes Analysis", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Luo_A_Deep_Sum-Product_2013_ICCV_paper.pdf"}, {"id": "134f1cee8408cca648d8b4ca44b38b0a7023af71", "title": "Partially Shared MultiTask Convolutional Neural Network with Local Constraint for Face Attribute Learning", "addresses": [{"address": "Zhejiang University", "lat": "30.19331415", "lng": "120.11930822", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/134f/1cee8408cca648d8b4ca44b38b0a7023af71.pdf"}, {"id": "d77f18917a58e7d4598d31af4e7be2762d858370", "title": "Detecting person presence in TV shows with linguistic and structural features", "addresses": [{"address": "Aix Marseille University, France", "lat": "43.29362100", "lng": "5.35806600", "type": "edu"}, {"address": "Orange Labs, Lannion, France", "lat": "48.75416800", "lng": "-3.45845860", "type": "company"}], "year": "2012", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6289062"}, {"id": "bd8f3fef958ebed5576792078f84c43999b1b207", "title": "BUAA-iCC at ImageCLEF 2015 Scalable Concept Image Annotation Challenge", "addresses": [{"address": "Beihang University", "lat": "39.98083330", "lng": "116.34101249", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/bd8f/3fef958ebed5576792078f84c43999b1b207.pdf"}, {"id": "c51fbd2574e488e486483e39702a3d7754cc769b", "title": "Face Recognition from Still Images to Video Sequences: A Local-Feature-Based Framework", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": "2011", "pdf": "https://pdfs.semanticscholar.org/c51f/bd2574e488e486483e39702a3d7754cc769b.pdf"}, {"id": "b13e2e43672e66ba45d1b852a34737e4ce04226b", "title": "Face Painting: querying art with photos", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/3552/4e63c11f13fe08b2996a7bc0a9105e7c407b.pdf"}, {"id": "65b1760d9b1541241c6c0222cc4ee9df078b593a", "title": "Enhanced Pictorial Structures for Precise Eye Localization Under Uncontrolled Conditions", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/65b1/760d9b1541241c6c0222cc4ee9df078b593a.pdf"}, {"id": "518a3ce2a290352afea22027b64bf3950bffc65a", "title": "Finding iconic images", "addresses": [{"address": "Columbia University", "lat": "40.84198360", "lng": "-73.94368971", "type": "edu"}], "year": "2009", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5204174"}, {"id": "c87f7ee391d6000aef2eadb49f03fc237f4d1170", "title": "A real-time and unsupervised face Re-Identification system for Human-Robot Interaction", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1804.03547.pdf"}, {"id": "14b87359f6874ff9b8ee234b18b418e57e75b762", "title": "Face Alignment Using a Ranking Model based on Regression Trees", "addresses": [{"address": "Istanbul Technical University", "lat": "41.10427915", "lng": "29.02231159", "type": "edu"}, {"address": "Karlsruhe Institute of Technology", "lat": "49.10184375", "lng": "8.43312560", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/1b62/6c14544f249cd52ef86a4efc17f3d3834003.pdf"}, {"id": "0e2d956790d3b8ab18cee8df6c949504ee78ad42", "title": "Scalable face image retrieval integrating multi-feature quantization and constrained reference re-ranking", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2013, "pdf": "https://doi.org/10.1109/IVCNZ.2013.6727024"}, {"id": "323f9ae6bdd2a4e4dce4168f7f7e19c70585c9b5", "title": "Empirically Analyzing the Effect of Dataset Biases on Deep Face Recognition Systems", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1712.01619.pdf"}, {"id": "1ce3a91214c94ed05f15343490981ec7cc810016", "title": "Exploring photobios", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}], "year": 2011, "pdf": "http://grail.cs.washington.edu/photobios/paper.pdf"}, {"id": "69eb6c91788e7c359ddd3500d01fb73433ce2e65", "title": "CAMGRAPH: Distributed Graph Processing for Camera Networks", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/69eb/6c91788e7c359ddd3500d01fb73433ce2e65.pdf"}, {"id": "2296d79753118cfcd0fecefece301557f4cb66e2", "title": "Exploring Disentangled Feature Representation Beyond Face Identification", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "SenseTime", "lat": "39.99300800", "lng": "116.32988200", "type": "company"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.03487.pdf"}, {"id": "53ce84598052308b86ba79d873082853022aa7e9", "title": "Optimized Method for Real-Time Face Recognition System Based on PCA and Multiclass Support Vector Machine", "addresses": [{"address": "Islamic Azad University", "lat": "34.84529990", "lng": "48.55962120", "type": "edu"}], "year": "2013", "pdf": "https://pdfs.semanticscholar.org/4f07/b70883a98a69be3b3e29de06c73e59a9ba0e.pdf"}, {"id": "a29a22878e1881d6cbf6acff2d0b209c8d3f778b", "title": "Benchmarking Still-to-Video Face Recognition via Partial and Local Linear Discriminant Analysis on COX-S2V Dataset", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/a29a/22878e1881d6cbf6acff2d0b209c8d3f778b.pdf"}, {"id": "052f994898c79529955917f3dfc5181586282cf8", "title": "Unsupervised Domain Adaptation for Face Recognition in Unlabeled Videos", "addresses": [{"address": "Dalian University of Technology", "lat": "38.88140235", "lng": "121.52281098", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.02191.pdf"}, {"id": "a0061dae94d916f60a5a5373088f665a1b54f673", "title": "Lensless computational imaging through deep learning", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/a006/1dae94d916f60a5a5373088f665a1b54f673.pdf"}, {"id": "809ea255d144cff780300440d0f22c96e98abd53", "title": "ArcFace: Additive Angular Margin Loss for Deep Face Recognition", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/809e/a255d144cff780300440d0f22c96e98abd53.pdf"}, {"id": "439ca6ded75dffa5ddea203dde5e621dc4a88c3e", "title": "Robust real-time performance-driven 3D face tracking", "addresses": [{"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICPR.2016.7899906"}, {"id": "9c59bb28054eee783a40b467c82f38021c19ff3e", "title": "Logistic similarity metric learning for face verification", "addresses": [{"address": "University of Lyon", "lat": "45.78332440", "lng": "4.87819840", "type": "edu"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7178311"}, {"id": "6043006467fb3fd1e9783928d8040ee1f1db1f3a", "title": "Face recognition with learning-based descriptor", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}, {"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2010, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2010.5539992"}, {"id": "9729930ab0f9cbcd07f1105bc69c540330cda50a", "title": "Compressing Fisher Vector for Robust Face Recognition", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ACCESS.2017.2749331"}, {"id": "3b9b200e76a35178da940279d566bbb7dfebb787", "title": "Learning Channel Inter-dependencies at Multiple Scales on Dense Networks for Face Recognition", "addresses": [{"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/3b9b/200e76a35178da940279d566bbb7dfebb787.pdf"}, {"id": "feea73095b1be0cbae1ad7af8ba2c4fb6f316d35", "title": "Deep Face Recognition with Center Invariant Loss", "addresses": [{"address": "Northeastern University", "lat": "42.33836680", "lng": "-71.08793524", "type": "edu"}], "year": "2017", "pdf": "http://dl.acm.org/citation.cfm?id=3126693"}, {"id": "54948ee407b5d32da4b2eee377cc44f20c3a7e0c", "title": "Right for the Right Reason: Training Agnostic Networks", "addresses": [{"address": "University of Bristol", "lat": "51.45848370", "lng": "-2.60977520", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.06296.pdf"}, {"id": "77869f274d4be4d4b4c438dbe7dff4baed521bd8", "title": "Face Recognition With Pose Variations and Misalignment via Orthogonal Procrustes Regression", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/TIP.2016.2551362"}, {"id": "0faee699eccb2da6cf4307ded67ba8434368257b", "title": "TAIGMAN: MULTIPLE ONE-SHOTS FOR UTILIZING CLASS LABEL INFORMATION 1 Multiple One-Shots for Utilizing Class Label Information", "addresses": [{"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}, {"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/2396/5bd9b557b04b2c81a35ee5c16951c0e420f3.pdf"}, {"id": "1eb9c859ff7537182a25556635954bcd11830822", "title": "Multi-features fusion based CRFs for face segmentation", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "Shanghai University", "lat": "31.32235655", "lng": "121.38400941", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICDSP.2015.7252004"}, {"id": "3cd9b0a61bdfa1bb8a0a1bf0369515a76ecd06e3", "title": "Distance Metric Learning with Eigenvalue Optimization", "addresses": [{"address": "University of Bristol", "lat": "51.45848370", "lng": "-2.60977520", "type": "edu"}, {"address": "University of Exeter", "lat": "50.73693020", "lng": "-3.53647672", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/51f7/3cfcc6d671bd99b5c3c512ff9b7bb959f33b.pdf"}, {"id": "f5aee1529b98136194ef80961ba1a6de646645fe", "title": "Large-scale learning of discriminative image representations", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/f5ae/e1529b98136194ef80961ba1a6de646645fe.pdf"}, {"id": "abdd17e411a7bfe043f280abd4e560a04ab6e992", "title": "Pose-Robust Face Recognition via Deep Residual Equivariant Mapping", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.00839.pdf"}, {"id": "d40c16285d762f7a1c862b8ac05a0fdb24af1202", "title": "Coarse-to-fine facial landmarks localization based on convolutional feature", "addresses": [{"address": "Beijing Jiaotong University", "lat": "39.94976005", "lng": "116.33629046", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/BESC.2017.8256378"}, {"id": "780c8a795baca1ba4cb4956cded877dd3d1ca313", "title": "Simulation of face recognition at a distance by scaling down images", "addresses": [{"address": "University of Louisville", "lat": "38.21675650", "lng": "-85.75725023", "type": "edu"}], "year": 2013, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ISSPIT.2013.6781879"}, {"id": "2f2aa67c5d6dbfaf218c104184a8c807e8b29286", "title": "Video analytics for surveillance camera networks", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": 2013, "pdf": "http://sesame.comp.nus.edu.sg/components/com_flexicontent/uploads/lekhaicon13.pdf"}, {"id": "982d4f1dee188f662a4b5616a045d69fc5c21b54", "title": "Learning to link human objects in videos and advertisements with clothes retrieval", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}, {"address": "Northeastern University", "lat": "42.33836680", "lng": "-71.08793524", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/IJCNN.2016.7727859"}, {"id": "81a80b26979b40d5ebe3f5ba70b03cb9f19dd7a5", "title": "Histogram equalized deep PCA with ELM classification for expressive face recognition", "addresses": [{"address": "Khon Kaen University", "lat": "16.46007565", "lng": "102.81211798", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369725"}, {"id": "86fa086d02f424705bbea53943390f009191740a", "title": "Precise eye localization with improved SDM", "addresses": [{"address": "Samsung SAIT, Korea", "lat": "37.25202260", "lng": "127.05550190", "type": "company"}, {"address": "Samsung SAIT, Beijing", "lat": "39.90419990", "lng": "116.40739630", "type": "company"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICIP.2015.7351651"}, {"id": "a35ed55dc330d470be2f610f4822f5152fcac4e1", "title": "Tattoo recognition technology - challenge (Tatt-C): an open tattoo database for developing tattoo recognition research", "addresses": [{"address": "NIST", "lat": "39.14004000", "lng": "-77.21850600", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ISBA.2015.7126369"}, {"id": "3ba74755c530347f14ec8261996dd9eae896e383", "title": "A Low-Power Convolutional Neural Network Face Recognition Processor and a CIS Integrated With Always-on Face Detector", "addresses": [{"address": "Korea Advanced Institute of Science and Technology", "lat": "36.36971910", "lng": "127.36253700", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/JSSC.2017.2767705"}, {"id": "7fc3442c8b4c96300ad3e860ee0310edb086de94", "title": "Similarity Scores Based on Background Samples", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}, {"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/82f3/b7cacc15e026fd3a7639091d54162f6ae064.pdf"}, {"id": "42f6f5454dda99d8989f9814989efd50fe807ee8", "title": "Conditional generative adversarial nets for convolutional face generation", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/42f6/f5454dda99d8989f9814989efd50fe807ee8.pdf"}, {"id": "0aebe97a92f590bdf21cdadfddec8061c682cdb2", "title": "Probabilistic Elastic Part Model: A Pose-Invariant Representation for Real-World Face Verification", "addresses": [{"address": "Microsoft", "lat": "47.64233180", "lng": "-122.13693020", "type": "company"}], "year": 2018, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2017.2695183"}, {"id": "7ffef9f26c39377ee937d29b8990580266a7a8a5", "title": "Deep Metric Learning with Hierarchical Triplet Loss", "addresses": [{"address": "University of Hong Kong", "lat": "22.20814690", "lng": "114.25964115", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.06951.pdf"}, {"id": "7862f646d640cbf9f88e5ba94a7d642e2a552ec9", "title": "Being John Malkovich", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/7862/f646d640cbf9f88e5ba94a7d642e2a552ec9.pdf"}, {"id": "754626bd5fb06fee5e10962fdfeddd495513e84b", "title": "Facial expression pair matching", "addresses": [{"address": "Istanbul Technical University", "lat": "41.10427915", "lng": "29.02231159", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/SIU.2017.7960646"}, {"id": "e3c8e49ffa7beceffca3f7f276c27ae6d29b35db", "title": "Families in the Wild (FIW): Large-Scale Kinship Image Database and Benchmarks", "addresses": [{"address": "Northeastern University", "lat": "42.33836680", "lng": "-71.08793524", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1604.02182.pdf"}, {"id": "b306bd9b485c6a6c1e4550beb1910ed9b6585359", "title": "Learning generative models of mid-level structure in natural images", "addresses": [{"address": "University of Edinburgh", "lat": "55.94951105", "lng": "-3.19534913", "type": "edu"}], "year": "2012", "pdf": "https://pdfs.semanticscholar.org/b306/bd9b485c6a6c1e4550beb1910ed9b6585359.pdf"}, {"id": "aed6af12148b43e4a24ee6e2bc3604ca59bd99a5", "title": "Discriminative Deep Metric Learning for Face and Kinship Verification", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2717505"}, {"id": "ffe4bb47ec15f768e1744bdf530d5796ba56cfc1", "title": "AFIF4: Deep Gender Classification based on AdaBoost-based Fusion of Isolated Facial Features and Foggy Faces", "addresses": [{"address": "York University", "lat": "43.77439110", "lng": "-79.50481085", "type": "edu"}, {"address": "Assiut University", "lat": "27.18794105", "lng": "31.17009498", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1706.04277.pdf"}, {"id": "467b602a67cfd7c347fe7ce74c02b38c4bb1f332", "title": "Large Margin Local Metric Learning", "addresses": [{"address": "University College London", "lat": "51.52316070", "lng": "-0.12820370", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/467b/602a67cfd7c347fe7ce74c02b38c4bb1f332.pdf"}, {"id": "9887ab220254859ffc7354d5189083a87c9bca6e", "title": "Generic Image Classification Approaches Excel on Face Recognition", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}, {"address": "University of Adelaide", "lat": "-34.91892260", "lng": "138.60423668", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/9887/ab220254859ffc7354d5189083a87c9bca6e.pdf"}, {"id": "0034e37a0faf0f71395245b266aacbf5412f190a", "title": "Face Distortion Recovery Based on Online Learning Database for Conversational Video", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/TMM.2014.2355134"}, {"id": "fe0cf8eaa5a5f59225197ef1bb8613e603cd96d4", "title": "Improved Face Verification with Simple Weighted Feature Combination", "addresses": [{"address": "Tongji University", "lat": "31.28473925", "lng": "121.49694909", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/4e20/8cfff33327863b5aeef0bf9b327798a5610c.pdf"}, {"id": "e20e2db743e8db1ff61279f4fda32bf8cf381f8e", "title": "Deep Cross Polarimetric Thermal-to-Visible Face Recognition", "addresses": [{"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1801.01486.pdf"}, {"id": "9b4d2cd2e5edbf5c8efddbdcce1db9a02a853534", "title": "Exponential Discriminant Locality Preserving Projection for face recognition", "addresses": [{"address": "Jiangsu University of Science and Technology", "lat": "32.19805500", "lng": "119.46326791", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1016/j.neucom.2016.02.063"}, {"id": "7783095a565094ae5b3dccf082d504ddd7255a5c", "title": "\"Wow! you are so beautiful today!\"", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": "2013", "pdf": "http://dl.acm.org/citation.cfm?id=2502258"}, {"id": "fff31548617f208cd5ae5c32917afd48abc4ff6a", "title": "Mobile situated analytics of ego-centric network data", "addresses": [{"address": "Hong Kong University of Science and Technology", "lat": "22.33863040", "lng": "114.26203370", "type": "edu"}], "year": 2017, "pdf": "http://doi.acm.org/10.1145/3139295.3139309"}, {"id": "0de1450369cb57e77ef61cd334c3192226e2b4c2", "title": "In defense of low-level structural features and SVMs for facial attribute classification: Application to detection of eye state, Mouth State, and eyeglasses in the wild", "addresses": [{"address": "Virginia Polytechnic Institute and State University", "lat": "37.21872455", "lng": "-80.42542519", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/BTAS.2017.8272747"}, {"id": "15cf7bdc36ec901596c56d04c934596cf7b43115", "title": "Face Extraction from Image based on K-Means Clustering Algorithms", "addresses": [{"address": "Islamic Azad University", "lat": "34.84529990", "lng": "48.55962120", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/15cf/7bdc36ec901596c56d04c934596cf7b43115.pdf"}, {"id": "3a49507c46a2b8c6411809c81ac47b2b1d2282c3", "title": "Exploring joint encoding of multi-direction local binary patterns for image classification", "addresses": [{"address": "Chongqing University", "lat": "29.50841740", "lng": "106.57858552", "type": "edu"}], "year": "2017", "pdf": "http://doi.org/10.1007/s11042-017-5319-0"}, {"id": "b81cae2927598253da37954fb36a2549c5405cdb", "title": "Experiments on Visual Information Extraction with the Faces of Wikipedia", "addresses": [{"address": "Polytechnique Montreal", "lat": "45.50438400", "lng": "-73.61288290", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/d892/753827950a227179b691e6df85820ab7c417.pdf"}, {"id": "58d76380d194248b3bb291b8c7c5137a0a376897", "title": "FaceID-GAN : Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/58d7/6380d194248b3bb291b8c7c5137a0a376897.pdf"}, {"id": "5dd57b7e0e82a33420c054da7ea3f435d49e910e", "title": "Matching and Perturbation Theories for Affine-Invariant Shapes Using QR Factorization with Column Pivoting", "addresses": [{"address": "Beihang University", "lat": "39.98083330", "lng": "116.34101249", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1007/s10851-014-0493-4"}, {"id": "110919f803740912e02bb7e1424373d325f558a9", "title": "Statistical Inference of Gaussian-Laplace Distribution for Person Verification", "addresses": [{"address": "China University of Geosciences", "lat": "30.52715100", "lng": "114.40076200", "type": "edu"}, {"address": "National Institute of Informatics, Japan", "lat": "35.69248530", "lng": "139.75825330", "type": "edu"}, {"address": "Wuhan University of Technology", "lat": "30.60903415", "lng": "114.35142840", "type": "edu"}], "year": 2017, "pdf": "http://doi.acm.org/10.1145/3123266.3123421"}, {"id": "537328af75f50d49696972a6c34bca97c14bc762", "title": "Exploiting Unintended Feature Leakage in Collaborative Learning", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.04049.pdf"}, {"id": "9825c4dddeb2ed7eaab668b55403aa2c38bc3320", "title": "Aerial Imagery for Roof Segmentation: A Large-Scale Dataset towards Automatic Mapping of Buildings", "addresses": [{"address": "University of Waterloo", "lat": "43.47061295", "lng": "-80.54724732", "type": "edu"}, {"address": "University of Tokyo", "lat": "35.90204480", "lng": "139.93622009", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.09532.pdf"}, {"id": "7a65fc9e78eff3ab6062707deaadde024d2fad40", "title": "A Study on Apparent Age Estimation", "addresses": [{"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Zhu_A_Study_on_ICCV_2015_paper.pdf"}, {"id": "0750c796467b6ef60b0caff5fb199337d54d431e", "title": "Face detection method based on histogram of sparse code in tree deformable model", "addresses": [{"address": "Chongqing University of Posts and Telecommunications", "lat": "29.53570460", "lng": "106.60482474", "type": "edu"}, {"address": "University of North Carolina Wilmington", "lat": "34.23755810", "lng": "-77.92701290", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICMLC.2016.7873015"}, {"id": "e66a6ae542907d6a0ebc45da60a62d3eecf17839", "title": "3D-aided face recognition from videos", "addresses": [{"address": "Morpho, SAFRAN Group, France", "lat": "48.82250670", "lng": "2.26875410", "type": "company"}, {"address": "University of Lyon", "lat": "45.78332440", "lng": "4.87819840", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/EUVIP.2014.7018366"}, {"id": "ae425a2654a1064c2eda29b08a492c8d5aab27a2", "title": "An incremental face recognition system based on deep learning", "addresses": [{"address": "National University of Defense Technology, China", "lat": "28.22902090", "lng": "112.99483204", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.23919/MVA.2017.7986845"}, {"id": "39c10888a470b92b917788c57a6fd154c97b421c", "title": "Joint multi-feature fusion and attribute relationships for facial attribute prediction", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/VCIP.2017.8305036"}, {"id": "d04d5692461d208dd5f079b98082eda887b62323", "title": "Subspace learning with frequency regularizer: Its application to face recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2015, "pdf": "http://www.cbsr.ia.ac.cn/users/zlei/papers/ICB2015/ZLEI-ICB-15.pdf"}, {"id": "e908ce44fa94bb7ecf2a8b70cb5ec0b1a00b311a", "title": "Topology preserving graph matching for partial face recognition", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019548"}, {"id": "84574aa43a98ad8a29470977e7b091f5a5ec2366", "title": "Latent max-margin metric learning for comparing video face tubes", "addresses": [{"address": "Max Planck Institute for Informatics", "lat": "49.25795660", "lng": "7.04577417", "type": "edu"}, {"address": "Technicolor, France", "lat": "48.83153300", "lng": "2.28066283", "type": "edu"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7301321"}, {"id": "9103148dd87e6ff9fba28509f3b265e1873166c9", "title": "Face Analysis using 3D Morphable Models", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/9103/148dd87e6ff9fba28509f3b265e1873166c9.pdf"}, {"id": "32c20afb5c91ed7cdbafb76408c3a62b38dd9160", "title": "Viewing Real-World Faces in 3D", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Hassner_Viewing_Real-World_Faces_2013_ICCV_paper.pdf"}, {"id": "f5eb0cf9c57716618fab8e24e841f9536057a28a", "title": "Rethinking Feature Distribution for Loss Functions in Image Classification", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.02988.pdf"}, {"id": "b8bcf9c773da1c5ee76db4bf750c9ff5d159f1a0", "title": "Homemade TS-Net for Automatic Face Recognition", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": 2016, "pdf": "http://doi.acm.org/10.1145/2911996.2911999"}, {"id": "24e82eaf3257e761d6ca0ffcc2cbca30dfca82e9", "title": "An analysis of the robustness of deep face recognition networks to noisy training labels", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}, {"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/GlobalSIP.2016.7906030"}, {"id": "4b9b30066a05bdeb0e05025402668499ebf99a6b", "title": "Real-time face detection using Gentle AdaBoost algorithm and nesting cascade structure", "addresses": [{"address": "Huaqiao University", "lat": "24.60047120", "lng": "118.08165740", "type": "edu"}], "year": 2012, "pdf": "https://doi.org/10.1109/ISPACS.2012.6473448"}, {"id": "23e824d1dfc33f3780dd18076284f07bd99f1c43", "title": "Spoofing faces using makeup: An investigative study", "addresses": [{"address": "INRIA M\u00e9diterran\u00e9e", "lat": "43.61581310", "lng": "7.06838000", "type": "edu"}, {"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686"}, {"id": "612075999e82596f3b42a80e6996712cc52880a3", "title": "CNNs with cross-correlation matching for face recognition in video surveillance using a single training sample per person", "addresses": [{"address": "University of Texas at Arlington", "lat": "32.72836830", "lng": "-97.11201835", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AVSS.2017.8078554"}, {"id": "6bfb0f8dd1a2c0b44347f09006dc991b8a08559c", "title": "Multiview discriminative learning for age-invariant face recognition", "addresses": [{"address": "Lomonosov Moscow State University", "lat": "55.70229715", "lng": "37.53179777", "type": "edu"}, {"address": "Advanced Digital Sciences Center, Singapore", "lat": "1.30372570", "lng": "103.77377630", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}, {"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": 2013, "pdf": "https://www.computer.org/web/csdl/index/-/csdl/proceedings/fg/2013/5545/00/06553724.pdf"}, {"id": "c58b7466f2855ffdcff1bebfad6b6a027b8c5ee1", "title": "Ultra-Resolving Face Images by Discriminative Generative Networks", "addresses": [{"address": "Australian National University", "lat": "-35.27769990", "lng": "149.11852700", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d6f1/42f5ddcb027e7b346eb20703abbf5cc4e883.pdf"}, {"id": "d8288322f32ee4501cef5a9b667e5bb79ebd7018", "title": "Facing scalability: Naming faces in an online social network", "addresses": [{"address": "University of Twente", "lat": "52.23801390", "lng": "6.85667610", "type": "edu"}], "year": 2012, "pdf": "https://doi.org/10.1016/j.patcog.2011.12.018"}, {"id": "de7f5e4ccc2f38e0c8f3f72a930ae1c43e0fdcf0", "title": "Merge or Not? Learning to Group Faces via Imitation Learning", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1707.03986.pdf"}, {"id": "eb87151fd2796ff5b4bbcf1906d41d53ac6c5595", "title": "Enhanced face detection using body part detections for wearable cameras", "addresses": [{"address": "IBM Thomas J. Watson Research Center", "lat": "41.21002475", "lng": "-73.80407056", "type": "company"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICPR.2016.7899719"}, {"id": "edb5813a32ce1167feb263ca2803d0ae934d902c", "title": "Invisible Steganography via Generative Adversarial Networks", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.08571.pdf"}, {"id": "7ca7255c2e0c86e4adddbbff2ce74f36b1dc522d", "title": "Stereo Matching for Unconstrained Face Recognition Ph . D . Proposal", "addresses": [{"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2009", "pdf": "https://pdfs.semanticscholar.org/7ca7/255c2e0c86e4adddbbff2ce74f36b1dc522d.pdf"}, {"id": "53bfe2ab770e74d064303f3bd2867e5bf7b86379", "title": "Learning to Synthesize and Manipulate Natural Images", "addresses": [{"address": "University of California, Berkeley", "lat": "37.86871260", "lng": "-122.25586815", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/d989/c3064d49bf8e63587ada4ed2bdb0d32b120a.pdf"}, {"id": "1a46d3a9bc1e4aff0ccac6403b49a13c8a89fc1d", "title": "Online robust image alignment via iterative convex optimization", "addresses": [{"address": "Purdue University", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu"}, {"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}, {"address": "Temple University", "lat": "39.95472495", "lng": "-75.15346905", "type": "edu"}], "year": 2012, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247878"}, {"id": "3dfb822e16328e0f98a47209d7ecd242e4211f82", "title": "Cross-Age LFW: A Database for Studying Cross-Age Face Recognition in Unconstrained Environments", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.08197.pdf"}, {"id": "27dafedccd7b049e87efed72cabaa32ec00fdd45", "title": "Unsupervised visual alignment with similarity graphs", "addresses": [{"address": "Tampere University of Technology", "lat": "61.44964205", "lng": "23.85877462", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2A_074.pdf"}, {"id": "e9c008d31da38d9eef67a28d2c77cb7daec941fb", "title": "Noisy Softmax: Improving the Generalization Ability of DCNN via Postponing the Early Softmax Saturation", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.03769.pdf"}, {"id": "39f525f3a0475e6bbfbe781ae3a74aca5b401125", "title": "Deep Joint Face Hallucination and Recognition", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/39f5/25f3a0475e6bbfbe781ae3a74aca5b401125.pdf"}, {"id": "88cd4209db62a34d9cba0b9cbe9d45d1e57d21e5", "title": "Runtime Neural Pruning", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/88cd/4209db62a34d9cba0b9cbe9d45d1e57d21e5.pdf"}, {"id": "3b75681f0162752865d85befd8b15e7d954ebfe6", "title": "Evaluation of a face recognition system performance's variation on a citizen passports database", "addresses": [{"address": "Universidad de la Rep\u00fablica, Uruguay", "lat": "-34.91817060", "lng": "-56.16657250", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/CLEI.2014.6965097"}, {"id": "ed9d11e995baeec17c5d2847ec1a8d5449254525", "title": "Efficient Gender Classification Using a Deep LDA-Pruned Net", "addresses": [{"address": "McGill University", "lat": "45.50397610", "lng": "-73.57496870", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/ed9d/11e995baeec17c5d2847ec1a8d5449254525.pdf"}, {"id": "21d1315761131ea6b3e2afe7a745b606341616fd", "title": "Generative Adversarial Network with Spatial Attention for Face Attribute Editing", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/21d1/315761131ea6b3e2afe7a745b606341616fd.pdf"}, {"id": "13a994d489c15d440c1238fc1ac37dad06dd928c", "title": "Learning Discriminant Face Descriptor for Face Recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/13a9/94d489c15d440c1238fc1ac37dad06dd928c.pdf"}, {"id": "96f0e7416994035c91f4e0dfa40fd45090debfc5", "title": "Unsupervised Learning of Face Representations", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.01260.pdf"}, {"id": "0141cb33c822e87e93b0c1bad0a09db49b3ad470", "title": "Unconstrained 3D face reconstruction", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298876"}, {"id": "d2f2b10a8f29165d815e652f8d44955a12d057e6", "title": "Multiscale binarised statistical image features for symmetric face matching using multiple descriptor fusion based on class-specific LDA", "addresses": [{"address": "Urmia University", "lat": "37.52914535", "lng": "45.04886077", "type": "edu"}], "year": "2015", "pdf": "http://doi.org/10.1007/s10044-015-0475-1"}, {"id": "582edc19f2b1ab2ac6883426f147196c8306685a", "title": "Do We Really Need to Collect Millions of Faces for Effective Face Recognition?", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/be6c/db7b181e73f546d43cf2ab6bc7181d7d619b.pdf"}, {"id": "2d3c17ced03e4b6c4b014490fe3d40c62d02e914", "title": "Video-driven state-aware facial animation", "addresses": [{"address": "Zhejiang University", "lat": "30.19331415", "lng": "120.11930822", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/2d3c/17ced03e4b6c4b014490fe3d40c62d02e914.pdf"}, {"id": "cd55fb30737625e86454a2861302b96833ed549d", "title": "Annotating Unconstrained Face Imagery: A scalable approach", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}, {"address": "Noblis, Falls Church, VA, U.S.A.", "lat": "38.95187000", "lng": "-77.36325900", "type": "company"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139094"}, {"id": "78f08685d44b6c6f82983d9b0f9c6ac2f7203a5e", "title": "An Adaptive Ensemble Approach to Ambient Intelligence Assisted People Search", "addresses": [{"address": "Tongji University", "lat": "31.28473925", "lng": "121.49694909", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/78f0/8685d44b6c6f82983d9b0f9c6ac2f7203a5e.pdf"}, {"id": "c4d0d09115a0df856cdb389fbccb20f62b07b14e", "title": "Environment coupled metrics learning for unconstrained face verification", "addresses": [{"address": "Chinese Academy of Science", "lat": "39.90419990", "lng": "116.40739630", "type": "edu"}], "year": 2012, "pdf": "https://doi.org/10.1109/ICIP.2012.6466925"}, {"id": "30180f66d5b4b7c0367e4b43e2b55367b72d6d2a", "title": "Template Adaptation for Face Verification and Identification", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": 2017, "pdf": "http://www.robots.ox.ac.uk/~vgg/publications/2017/Crosswhite17/crosswhite17.pdf"}, {"id": "955e2a39f51c0b6f967199942d77625009e580f9", "title": "Naming Faces on the Web", "addresses": [{"address": "Bilkent University", "lat": "39.87204890", "lng": "32.75395155", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/955e/2a39f51c0b6f967199942d77625009e580f9.pdf"}, {"id": "fdd19fee07f2404952e629cc7f7ffaac14febe01", "title": "Face recognition based on dictionary learning with the locality constraints of atoms", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}, {"address": "Guangdong Polytechnic Normal University", "lat": "23.13170700", "lng": "113.37164300", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/CISP-BMEI.2016.7852754"}, {"id": "e39a66a6d1c5e753f8e6c33cd5d335f9bc9c07fa", "title": "Weakly Supervised Learning for Unconstrained Face Processing", "addresses": [{"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}], "year": "2014", "pdf": "https://pdfs.semanticscholar.org/e39a/66a6d1c5e753f8e6c33cd5d335f9bc9c07fa.pdf"}, {"id": "051f03bc25ec633592aa2ff5db1d416b705eac6c", "title": "Partial face recognition: An alignment free approach", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2011, "pdf": "http://www.cse.msu.edu/biometrics/Publications/Face/LiaoJain_PartialFR_AlignmentFreeApproach_ICJB11.pdf"}, {"id": "f6e6b4d0b7c16112dcb71ff502033a2187b1ec9b", "title": "Understanding Blooming Human Groups in Social Networks", "addresses": [{"address": "Hefei University of Technology", "lat": "31.84691800", "lng": "117.29053367", "type": "edu"}, {"address": "University of Texas at San Antonio", "lat": "29.58333105", "lng": "-98.61944505", "type": "edu"}, {"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/TMM.2015.2476657"}, {"id": "486840f4f524e97f692a7f6b42cd19019ee71533", "title": "DeepVisage: Making Face Recognition Simple Yet With Powerful Generalization Skills", "addresses": [{"address": "\u00c9cole Centrale de Lyon", "lat": "45.78359660", "lng": "4.76789480", "type": "edu"}, {"address": "Safran Identity and Security", "lat": "48.83249300", "lng": "2.26747400", "type": "company"}], "year": 2017, "pdf": "https://arxiv.org/pdf/1703.08388v2.pdf"}, {"id": "a961f1234e963a7945fed70197015678149b37d8", "title": "Facial Expression Synthesis by U-Net Conditional Generative Adversarial Networks", "addresses": [{"address": "Beihang University", "lat": "39.98083330", "lng": "116.34101249", "type": "edu"}], "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3206068"}, {"id": "660c99ac408b535bb0468ab3708d0d1d5db30180", "title": "An improved redundant dictionary based on sparse representation for face recognition", "addresses": [{"address": "China University of Mining and Technology", "lat": "34.21525380", "lng": "117.13985410", "type": "edu"}], "year": "2015", "pdf": "http://doi.org/10.1007/s11042-015-3083-6"}, {"id": "c444c4dab97dd6d6696f56c1cacda051dde60448", "title": "Multiview Face Detection and Registration Requiring Minimal Manual Intervention", "addresses": [{"address": "A*STAR, Singapore", "lat": "1.29889260", "lng": "103.78731070", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2013, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.37"}, {"id": "1aeef2ab062c27e0dbba481047e818d4c471ca57", "title": "Analyzing impact of image scaling algorithms on viola-jones face detection framework", "addresses": [{"address": "Central Electronics Research Institute, Pilani, India", "lat": "28.36561930", "lng": "75.58349530", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICACCI.2015.7275860"}, {"id": "d6ae7941dcec920d5726d50d1b1cdfe4dde34d35", "title": "Avatar digitization from a single image for real-time rendering", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": "2017", "pdf": "http://dl.acm.org/citation.cfm?id=31310887"}, {"id": "fba95853ca3135cc52a4b2bc67089041c2a9408c", "title": "Disguised Faces in the Wild", "addresses": [{"address": "Indian Institute of Technology Delhi", "lat": "28.54632595", "lng": "77.27325504", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/fba9/5853ca3135cc52a4b2bc67089041c2a9408c.pdf"}, {"id": "4512b87d68458d9ba0956c0f74b60371b6c69df4", "title": "SuperPatchMatch: An Algorithm for Robust Correspondences Using Superpixel Patches", "addresses": [{"address": "University of Bordeaux", "lat": "44.80837500", "lng": "-0.59670500", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2708504"}, {"id": "38a9ca2c49a77b540be52377784b9f734e0417e4", "title": "Face verification using large feature sets and one shot similarity", "addresses": [{"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}, {"address": "University of Campinas", "lat": "-27.59539950", "lng": "-48.61542180", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2011, "pdf": "http://homepages.dcc.ufmg.br/~william/papers/paper_2011_IJCB_Faces.pdf"}, {"id": "ac2881bdf7b57dc1672a17b221d68a438d79fce8", "title": "Learning a High Fidelity Pose Invariant Model for High-resolution Face Frontalization", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.08472.pdf"}, {"id": "69adbfa7b0b886caac15ebe53b89adce390598a3", "title": "Face hallucination using cascaded super-resolution and identity priors", "addresses": [{"address": "University of Ljubljana", "lat": "46.05015580", "lng": "14.46907327", "type": "edu"}, {"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.10938.pdf"}, {"id": "8686b15802529ff8aea50995ef14079681788110", "title": "Deformed Graph Laplacian for Semisupervised Learning", "addresses": [{"address": "University of Technology", "lat": "-33.88405040", "lng": "151.19922540", "type": "edu"}, {"address": "Shanghai Jiao Tong University", "lat": "31.20081505", "lng": "121.42840681", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/TNNLS.2014.2376936"}, {"id": "9f131b4e036208f2402182a1af2a59e3c5d7dd44", "title": "Face Retrieval Framework Relying on User's Visual Memory", "addresses": [{"address": "University of Tokyo", "lat": "35.90204480", "lng": "139.93622009", "type": "edu"}, {"address": "Waseda University", "lat": "33.88987280", "lng": "130.70856205", "type": "edu"}], "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3206038"}, {"id": "09ce14b84af2dc2f76ae1cf227356fa0ba337d07", "title": "Face reconstruction in the wild", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}], "year": 2011, "pdf": "http://grail.cs.washington.edu/3dfaces/paper.pdf"}, {"id": "d79365336115661b0e8dbbcd4b2aa1f504b91af6", "title": "Variational methods for conditional multimodal deep learning", "addresses": [{"address": "Indian Institute of Science Bangalore", "lat": "13.02223470", "lng": "77.56718325", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1603.01801.pdf"}, {"id": "7df4f96138a4e23492ea96cf921794fc5287ba72", "title": "A Jointly Learned Deep Architecture for Facial Attribute Analysis and Face Detection in the Wild", "addresses": [{"address": "Fudan University", "lat": "31.30104395", "lng": "121.50045497", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1707.08705.pdf"}, {"id": "6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4", "title": "Deep Learning Face Attributes in the Wild", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2015, "pdf": "http://arxiv.org/pdf/1411.7766v2.pdf"}, {"id": "dc6ad30c7a4bc79bb06b4725b16e202d3d7d8935", "title": "Age classification with deep learning face representation", "addresses": [{"address": "South China Normal University", "lat": "23.14319700", "lng": "113.34009651", "type": "edu"}, {"address": "South China University of Technology", "lat": "23.05020420", "lng": "113.39880323", "type": "edu"}], "year": "2017", "pdf": "http://doi.org/10.1007/s11042-017-4646-5"}, {"id": "bbcb4920b312da201bf4d2359383fb4ee3b17ed9", "title": "Robust Face Recognition via Multi-Scale Patch-Based Matrix Regression", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/bbcb/4920b312da201bf4d2359383fb4ee3b17ed9.pdf"}, {"id": "feb6e267923868bff6e2108603d00fdfd65251ca", "title": "Unsupervised Discovery of Visual Face Categories", "addresses": [{"address": "Southeast University", "lat": "32.05752790", "lng": "118.78682252", "type": "edu"}, {"address": "King Saud University", "lat": "24.72464030", "lng": "46.62335012", "type": "edu"}, {"address": "University of Nevada", "lat": "39.54694490", "lng": "-119.81346566", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/feb6/e267923868bff6e2108603d00fdfd65251ca.pdf"}, {"id": "8f6263e4d3775757e804796e104631c7a2bb8679", "title": "Characterizing Visual Representations within Convolutional Neural Networks: Toward a Quantitative Approach", "addresses": [{"address": "Harvard University", "lat": "42.36782045", "lng": "-71.12666653", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/8f62/63e4d3775757e804796e104631c7a2bb8679.pdf"}, {"id": "2ab034e1f54c37bfc8ae93f7320160748310dc73", "title": "Siamese Capsule Networks", "addresses": [{"address": "University of Liverpool", "lat": "53.40617900", "lng": "-2.96670819", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.07242.pdf"}, {"id": "1d1a7ef193b958f9074f4f236060a5f5e7642fc1", "title": "Ensemble of Patterns of Oriented Edge Magnitudes Descriptors For Face Recognition", "addresses": [{"address": "University of Bologna", "lat": "44.49623180", "lng": "11.35415700", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/db40/804914afbb7f8279ca9a4f52e0ade695f19e.pdf"}, {"id": "36486944b4feeb88c0499fecd253c5a53034a23f", "title": "Deep feature selection and projection for cross-age face retrieval", "addresses": [{"address": "Shanghai Jiao Tong University", "lat": "31.20081505", "lng": "121.42840681", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/CISP-BMEI.2017.8301986"}, {"id": "e7436b8e68bb7139b823a7572af3decd96241e78", "title": "A new approach for face detection with omnidirectional sensors", "addresses": [{"address": "IRSEEM Rouen, France", "lat": "49.44323200", "lng": "1.09997100", "type": "edu"}, {"address": "University of Rouen, France", "lat": "49.38497570", "lng": "1.06832570", "type": "edu"}], "year": 2011, "pdf": "https://doi.org/10.1109/ROBIO.2011.6181560"}, {"id": "96e318f8ff91ba0b10348d4de4cb7c2142eb8ba9", "title": "State-of-the-art face recognition performance using publicly available software and datasets", "addresses": [{"address": "Universit\u00e9 Paris-Saclay, France", "lat": "48.84760370", "lng": "2.26399340", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8364450"}, {"id": "e8523c4ac9d7aa21f3eb4062e09f2a3bc1eedcf7", "title": "Toward End-to-End Face Recognition Through Alignment Learning", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1701.07174.pdf"}, {"id": "405526dfc79de98f5bf3c97bf4aa9a287700f15d", "title": "MegaFace: A Million Faces for Recognition at Scale", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/8a6c/57fcd99a77982ec754e0b97fd67519ccb60c.pdf"}, {"id": "053b263b4a4ccc6f9097ad28ebf39c2957254dfb", "title": "Cost-Effective HITs for Relative Similarity Comparisons", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}, {"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/7a49/4b4489408ec3adea15817978ecd2e733f5fe.pdf"}, {"id": "82e66c4832386cafcec16b92ac88088ffd1a1bc9", "title": "OpenFace: A general-purpose face recognition library with mobile applications", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Poznan University of Technology", "lat": "52.40048370", "lng": "16.95158083", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/82e6/6c4832386cafcec16b92ac88088ffd1a1bc9.pdf"}, {"id": "8bbbdff11e88327816cad3c565f4ab1bb3ee20db", "title": "Automatic Semantic Face Recognition", "addresses": [{"address": "University of Southampton", "lat": "50.89273635", "lng": "-1.39464295", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.31"}, {"id": "2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d", "title": "Genealogical face recognition based on UB KinFace database", "addresses": [{"address": "SUNY Buffalo", "lat": "42.93362780", "lng": "-78.88394479", "type": "edu"}], "year": 2011, "pdf": "https://doi.org/10.1109/CVPRW.2011.5981801"}, {"id": "5f57a1a3a1e5364792b35e8f5f259f92ad561c1f", "title": "Implicit Sparse Code Hashing", "addresses": [{"address": "Institute of Information Science", "lat": "25.04107280", "lng": "121.61475620", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/5f57/a1a3a1e5364792b35e8f5f259f92ad561c1f.pdf"}, {"id": "1677d29a108a1c0f27a6a630e74856e7bddcb70d", "title": "Efficient Misalignment-Robust Representation for Real-Time Face Recognition", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/1677/d29a108a1c0f27a6a630e74856e7bddcb70d.pdf"}, {"id": "5f4219118556d2c627137827a617cf4e26242a6e", "title": "Explicit Shape Regression With Characteristic Number for Facial Landmark Localization", "addresses": [{"address": "Dalian University of Technology", "lat": "38.88140235", "lng": "121.52281098", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/TMM.2017.2751143"}, {"id": "0c1d85a197a1f5b7376652a485523e616a406273", "title": "Joint Registration and Representation Learning for Unconstrained Face Identification", "addresses": [{"address": "Khalifa University", "lat": "24.44690250", "lng": "54.39425630", "type": "edu"}, {"address": "University of Canberra", "lat": "-35.23656905", "lng": "149.08446994", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.169"}, {"id": "84e6669b47670f9f4f49c0085311dce0e178b685", "title": "Face frontalization for Alignment and Recognition", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "University of Twente", "lat": "52.23801390", "lng": "6.85667610", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/84e6/669b47670f9f4f49c0085311dce0e178b685.pdf"}, {"id": "5213549200bccec57232fc3ff788ddf1043af7b3", "title": "Displaced dynamic expression regression for real-time facial tracking and animation", "addresses": [{"address": "Zhejiang University", "lat": "30.19331415", "lng": "120.11930822", "type": "edu"}], "year": 2014, "pdf": "http://doi.acm.org/10.1145/2601097.2601204"}, {"id": "5039834df68600a24e7e8eefb6ba44a5124e67fc", "title": "Modular hierarchical feature learning with deep neural networks for face verification", "addresses": [{"address": "Beijing Institute of Science and Technology Information", "lat": "40.04332040", "lng": "116.34181090", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2013, "pdf": "https://doi.org/10.1109/ICIP.2013.6738761"}, {"id": "44f48a4b1ef94a9104d063e53bf88a69ff0f55f3", "title": "Automatically Building Face Datasets of New Domains from Weakly Labeled Data with Pretrained Models", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/44f4/8a4b1ef94a9104d063e53bf88a69ff0f55f3.pdf"}, {"id": "6584c3c877400e1689a11ef70133daa86a238602", "title": "Supervised Committee of Convolutional Neural Networks in Automated Facial Expression Analysis", "addresses": [{"address": "Universitat Oberta de Catalunya", "lat": "41.40657415", "lng": "2.19453410", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8039231"}, {"id": "5763b09ebca9a756b4adebf74d6d7de27e80e298", "title": "Picture-specific cohort score normalization for face pair matching", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2013, "pdf": "https://doi.org/10.1109/BTAS.2013.6712738"}, {"id": "13bda03fc8984d5943ed8d02e49a779d27c84114", "title": "Efficient object detection using cascades of nearest convex model classifiers", "addresses": [{"address": "Eskisehir Osmangazi University", "lat": "39.74875160", "lng": "30.47653071", "type": "edu"}], "year": 2012, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6248047"}, {"id": "47190d213caef85e8b9dd0d271dbadc29ed0a953", "title": "The Devil of Face Recognition is in the Noise", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}, {"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11649.pdf"}, {"id": "e66b4aa85524f493dafde8c75176ac0afad5b79c", "title": "Watchlist risk assessment using multiparametric cost and relative entropy", "addresses": [{"address": "University of Calgary", "lat": "51.07840380", "lng": "-114.12870770", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/SSCI.2017.8285219"}, {"id": "8af411697e73f6cfe691fe502d4bfb42510b4835", "title": "Dynamic Local Ternary Pattern for Face Recognition and Verification", "addresses": [{"address": "Hankuk University of Foreign Studies", "lat": "37.59539790", "lng": "127.06304990", "type": "edu"}, {"address": "Institute of Information Technology", "lat": "23.72898990", "lng": "90.39826820", "type": "edu"}, {"address": "University of Dhaka", "lat": "23.73169570", "lng": "90.39652750", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/8af4/11697e73f6cfe691fe502d4bfb42510b4835.pdf"}, {"id": "1d3dd9aba79a53390317ec1e0b7cd742cba43132", "title": "A maximum entropy feature descriptor for age invariant face recognition", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2015, "pdf": "http://www.cise.ufl.edu/~dihong/assets/Gong_A_Maximum_Entropy_2015_CVPR_paper.pdf"}, {"id": "539f55c0e2501c1d86791c8b54b225d9b3187b9c", "title": "Low-Rank Latent Pattern Approximation With Applications to Robust Image Classification", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2738560"}, {"id": "82ccd62f70e669ec770daf11d9611cab0a13047e", "title": "Sparse Variation Pattern for Texture Classification", "addresses": [{"address": "Azad University", "lat": "36.31734320", "lng": "50.03672860", "type": "edu"}, {"address": "Tafresh University", "lat": "34.68092465", "lng": "50.05341352", "type": "edu"}, {"address": "University of Western Australia", "lat": "-31.95040445", "lng": "115.79790037", "type": "edu"}], "year": 2013, "pdf": "http://www.csse.uwa.edu.au/~ajmal/papers/Farshid_DICTA2013.pdf"}, {"id": "1b5875dbebc76fec87e72cee7a5263d325a77376", "title": "Learnt Quasi-Transitive Similarity for Retrieval from Large Collections of Faces", "addresses": [{"address": "University of St Andrews", "lat": "56.34119840", "lng": "-2.79309380", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.528"}, {"id": "5632ba72b2652df3b648b2ee698233e76a4eee65", "title": "Reconstruction of 3D facial image using a single 2D image", "addresses": [{"address": "Xiamen University", "lat": "24.43994190", "lng": "118.09301781", "type": "edu"}, {"address": "University of Newcastle", "lat": "-33.35788990", "lng": "151.37834708", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8346387"}, {"id": "7c42371bae54050dbbf7ded1e7a9b4109a23a482", "title": "Optimized features selection using hybrid PSO-GA for multi-view gender classification", "addresses": [{"address": "Foundation University Rawalpindi Campus", "lat": "33.56095040", "lng": "73.07125966", "type": "edu"}, {"address": "University of Central Punjab", "lat": "31.44661490", "lng": "74.26797620", "type": "edu"}, {"address": "University of Dammam", "lat": "26.39793625", "lng": "50.19807924", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/7c42/371bae54050dbbf7ded1e7a9b4109a23a482.pdf"}, {"id": "d23ec100432d860b12308941f8539af82a28843f", "title": "Adversarial Semantic Scene Completion from a Single Depth Image", "addresses": [{"address": "Technical University Munich", "lat": "48.14955455", "lng": "11.56775314", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.10901.pdf"}, {"id": "43fe03ec1acb6ea9d05d2b22eeddb2631bd30437", "title": "Weakly supervised multiscale-inception learning for web-scale face recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296394"}, {"id": "93971a49ef6cc88a139420349a1dfd85fb5d3f5c", "title": "Scalable Probabilistic Models: Applied to Face Identification in the Wild", "addresses": [{"address": "IDIAP Research Institute", "lat": "46.10923700", "lng": "7.08453549", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/9397/1a49ef6cc88a139420349a1dfd85fb5d3f5c.pdf"}, {"id": "bf8a520533f401347e2f55da17383a3e567ef6d8", "title": "Bounded-Distortion Metric Learning", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/bf8a/520533f401347e2f55da17383a3e567ef6d8.pdf"}, {"id": "e9e40e588f8e6510fa5537e0c9e083ceed5d07ad", "title": "Fast Face Detection Using Graphics Processor", "addresses": [{"address": "National Institute of Technology, Karnataka", "lat": "13.01119095", "lng": "74.79498825", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/e9e4/0e588f8e6510fa5537e0c9e083ceed5d07ad.pdf"}, {"id": "192235f5a9e4c9d6a28ec0d333e36f294b32f764", "title": "Reconfiguring the Imaging Pipeline for Computer Vision", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": 2017, "pdf": "http://www.andrew.cmu.edu/user/sjayasur/iccv.pdf"}, {"id": "cd6aaa37fffd0b5c2320f386be322b8adaa1cc68", "title": "Deep Face Recognition: A Survey", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.06655.pdf"}, {"id": "e4e3faa47bb567491eaeaebb2213bf0e1db989e1", "title": "Empirical Risk Minimization for Metric Learning Using Privileged Information", "addresses": [{"address": "University of Technology Sydney", "lat": "-33.88096510", "lng": "151.20107299", "type": "edu"}, {"address": "Hefei University of Technology", "lat": "31.84691800", "lng": "117.29053367", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/e4e3/faa47bb567491eaeaebb2213bf0e1db989e1.pdf"}, {"id": "4b02387c2db968a70b69d98da3c443f139099e91", "title": "Detecting facial landmarks in the video based on a hybrid framework", "addresses": [{"address": "Guangdong University of Technology", "lat": "23.13538360", "lng": "113.29470496", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4b02/387c2db968a70b69d98da3c443f139099e91.pdf"}, {"id": "f7c50d2be9fba0e4527fd9fbe3095e9d9a94fdd3", "title": "Large Margin Multi-metric Learning for Face and Kinship Verification in the Wild", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/f7c5/0d2be9fba0e4527fd9fbe3095e9d9a94fdd3.pdf"}, {"id": "217de4ff802d4904d3f90d2e24a29371307942fe", "title": "POOF: Part-Based One-vs.-One Features for Fine-Grained Categorization, Face Verification, and Attribute Estimation", "addresses": [{"address": "Columbia University", "lat": "40.84198360", "lng": "-73.94368971", "type": "edu"}], "year": 2013, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2013.128"}, {"id": "e3b324101157daede3b4d16bdc9c2388e849c7d4", "title": "Robust Real-Time 3 D Face Tracking from RGBD Videos under Extreme Pose , Depth , and Expression Variations", "addresses": [{"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/e3b3/24101157daede3b4d16bdc9c2388e849c7d4.pdf"}, {"id": "8ee5b1c9fb0bded3578113c738060290403ed472", "title": "Extending explicit shape regression with mixed feature channels and pose priors", "addresses": [{"address": "Karlsruhe Institute of Technology", "lat": "49.10184375", "lng": "8.43312560", "type": "edu"}], "year": 2014, "pdf": "https://infoscience.epfl.ch/record/200452/files/wacv2014-RGE.pdf"}, {"id": "0c59071ddd33849bd431165bc2d21bbe165a81e0", "title": "Person Recognition in Personal Photo Collections", "addresses": [{"address": "Max Planck Institute for Informatics", "lat": "49.25795660", "lng": "7.04577417", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Oh_Person_Recognition_in_ICCV_2015_paper.pdf"}, {"id": "4223917177405eaa6bdedca061eb28f7b440ed8e", "title": "B-spline Shape from Motion & Shading: An Automatic Free-form Surface Modeling for Face Reconstruction", "addresses": [{"address": "Tianjin University", "lat": "36.20304395", "lng": "117.05842113", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4223/917177405eaa6bdedca061eb28f7b440ed8e.pdf"}, {"id": "90ad0daa279c3e30b360f9fe9371293d68f4cebf", "title": "Spatio-temporal Framework and Algorithms for Video-based Face Recognition", "addresses": [{"address": "Multimedia University", "lat": "2.92749755", "lng": "101.64185301", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/90ad/0daa279c3e30b360f9fe9371293d68f4cebf.pdf"}, {"id": "c675534be881e59a78a5986b8fb4e649ddd2abbe", "title": "Face recognition by landmark pooling-based CNN with concentrate loss", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296548"}, {"id": "561ae67de137e75e9642ab3512d3749b34484310", "title": "DeepGestalt - Identifying Rare Genetic Syndromes Using Deep Learning", "addresses": [{"address": "Rheinische-Friedrich-Wilhelms University", "lat": "50.73381240", "lng": "7.10224650", "type": "edu"}, {"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}, {"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/561a/e67de137e75e9642ab3512d3749b34484310.pdf"}, {"id": "39f03d1dfd94e6f06c1565d7d1bb14ab0eee03bc", "title": "Simultaneous Local Binary Feature Learning and Encoding for Face Recognition", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}, {"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2015, "pdf": "http://openaccess.thecvf.com/content_iccv_2015/papers/Lu_Simultaneous_Local_Binary_ICCV_2015_paper.pdf"}, {"id": "2b869d5551b10f13bf6fcdb8d13f0aa4d1f59fc4", "title": "Ring loss: Convex Feature Normalization for Face Recognition", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.00130.pdf"}, {"id": "b1fdd4ae17d82612cefd4e78b690847b071379d3", "title": "Supervised Descent Method", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/4fc5/416b6c7173d3462e5be796bda3ad8d5645a1.pdf"}, {"id": "3dfd94d3fad7e17f52a8ae815eb9cc5471172bc0", "title": "Face2Text: Collecting an Annotated Image Description Corpus for the Generation of Rich Face Descriptions", "addresses": [{"address": "University of Copenhagen", "lat": "55.68015020", "lng": "12.57232700", "type": "edu"}, {"address": "University of Malta", "lat": "35.90232260", "lng": "14.48341890", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/3dfd/94d3fad7e17f52a8ae815eb9cc5471172bc0.pdf"}, {"id": "1921795408345751791b44b379f51b7dd54ebfa2", "title": "From Face Recognition to Models of Identity: A Bayesian Approach to Learning About Unknown Identities from Unsupervised Data", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.07872.pdf"}, {"id": "f05ad40246656a977cf321c8299158435e3f3b61", "title": "Face Recognition Using Face Patch Networks", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Lu_Face_Recognition_Using_2013_ICCV_paper.pdf"}, {"id": "5134353bd01c4ea36bd007c460e8972b1541d0ad", "title": "Face Recognition with Multi-Resolution Spectral Feature Images", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}, {"address": "Anhui University", "lat": "31.76909325", "lng": "117.17795091", "type": "edu"}], "year": "2013", "pdf": "https://pdfs.semanticscholar.org/5134/353bd01c4ea36bd007c460e8972b1541d0ad.pdf"}, {"id": "29db046dd1f8100b279c3f5f5c5ef19bdbf5af9a", "title": "Recent Progress of Face Image Synthesis", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1706.04717.pdf"}, {"id": "940e5c45511b63f609568dce2ad61437c5e39683", "title": "Fiducial Facial Point Extraction Using a Novel Projective Invariant", "addresses": [{"address": "Dalian University of Technology", "lat": "38.88140235", "lng": "121.52281098", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/TIP.2015.2390976"}, {"id": "785eeac2e236a85a45b4e0356c0745279c31e089", "title": "Learning Person-Specific Representations From Faces in the Wild", "addresses": [{"address": "Harvard University", "lat": "42.36782045", "lng": "-71.12666653", "type": "edu"}, {"address": "State University of Campinas", "lat": "-22.81377650", "lng": "-47.06400040", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/TIFS.2014.2359543"}, {"id": "9901f473aeea177a55e58bac8fd4f1b086e575a4", "title": "Human and sheep facial landmarks localisation by triplet interpolated features", "addresses": [{"address": "University of Cambridge", "lat": "52.17638955", "lng": "0.14308882", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1509.04954.pdf"}, {"id": "e1449be4951ba7519945cd1ad50656c3516113da", "title": "Local Gradient Hexa Pattern: A Descriptor for Face Recognition and Retrieval", "addresses": [{"address": "IIIT Allahabad, India", "lat": "25.42991140", "lng": "81.77118270", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/TCSVT.2016.2603535"}, {"id": "9989ad33b64accea8042e386ff3f1216386ba7f1", "title": "Facial feature extraction method based on shallow and deep fusion CNN", "addresses": [{"address": "Guilin University of Electronic Technology Guangxi Guilin", "lat": "25.28739920", "lng": "110.33242770", "type": "edu"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393320"}, {"id": "8efda5708bbcf658d4f567e3866e3549fe045bbb", "title": "Pre-trained Deep Convolutional Neural Networks for Face Recognition", "addresses": [{"address": "University of Groningen", "lat": "53.21967825", "lng": "6.56251482", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/8efd/a5708bbcf658d4f567e3866e3549fe045bbb.pdf"}, {"id": "3b092733f428b12f1f920638f868ed1e8663fe57", "title": "On the size of Convolutional Neural Networks and generalization performance", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "http://www.math.jhu.edu/~data/RamaPapers/PerformanceBounds.pdf"}, {"id": "55966926e7c28b1eee1c7eb7a0b11b10605a1af0", "title": "Surpassing Human-Level Face Verification Performance on LFW with GaussianFace", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/baa8/bdeb5aa545af5b5f43efaf9dda08490da0bc.pdf"}, {"id": "e6d6203fa911429d76f026e2ec2de260ec520432", "title": "Siamese network features for image matching", "addresses": [{"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}, {"address": "Aalto University", "lat": "60.18558755", "lng": "24.82427330", "type": "edu"}], "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899663"}, {"id": "4d3c4c3fe8742821242368e87cd72da0bd7d3783", "title": "Hybrid Deep Learning for Face Verification", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2013, "pdf": "http://www.ee.cuhk.edu.hk/~xgwang/papers/sunWTiccv13.pdf"}, {"id": "9af9a88c60d9e4b53e759823c439fc590a4b5bc5", "title": "Learning Deep Convolutional Embeddings for Face Representation Using Joint Sample- and Set-Based Supervision", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.00277.pdf"}, {"id": "939123cf21dc9189a03671484c734091b240183e", "title": "Within- and cross- database evaluations for face gender classification via befit protocols", "addresses": [{"address": "IDIAP Research Institute", "lat": "46.10923700", "lng": "7.08453549", "type": "edu"}], "year": 2014, "pdf": "http://publications.idiap.ch/downloads/papers/2015/Erdogmus_MMSP_2015.pdf"}, {"id": "061e29eae705f318eee703b9e17dc0989547ba0c", "title": "Enhancing Expression Recognition in the Wild with Unlabeled Reference Data", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/061e/29eae705f318eee703b9e17dc0989547ba0c.pdf"}, {"id": "435642641312364e45f4989fac0901b205c49d53", "title": "Face Model Compression by Distilling Knowledge from Neurons", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4356/42641312364e45f4989fac0901b205c49d53.pdf"}, {"id": "80d42f74ee9bf03f3790c8d0f5a307deffe0b3b7", "title": "Learning Kernel Extended Dictionary for Face Recognition", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TNNLS.2016.2522431"}, {"id": "edbddf8c176d6e914f0babe64ad56c051597d415", "title": "Predicting Image Memorability Through Adaptive Transfer Learning From External Sources", "addresses": [{"address": "Shandong University", "lat": "36.36934730", "lng": "120.67381800", "type": "edu"}, {"address": "Tianjin University", "lat": "36.20304395", "lng": "117.05842113", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TMM.2016.2644866"}, {"id": "18c6c92c39c8a5a2bb8b5673f339d3c26b8dcaae", "title": "Learning invariant representations and applications to face verification", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}, {"address": "McGovern Institute for Brain Research", "lat": "42.36262950", "lng": "-71.09144810", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/18c6/c92c39c8a5a2bb8b5673f339d3c26b8dcaae.pdf"}, {"id": "37c8514df89337f34421dc27b86d0eb45b660a5e", "title": "Facial Landmark Tracking by Tree-Based Deformable Part Model Based Detector", "addresses": [{"address": "Czech Technical University", "lat": "50.07642960", "lng": "14.41802312", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w25/papers/Uricar_Facial_Landmark_Tracking_ICCV_2015_paper.pdf"}, {"id": "f3a59d85b7458394e3c043d8277aa1ffe3cdac91", "title": "Query-Free Attacks on Industry-Grade Face Recognition Systems under Resource Constraints", "addresses": [{"address": "Indiana University", "lat": "39.86948105", "lng": "-84.87956905", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1802.09900.pdf"}, {"id": "96e1ccfe96566e3c96d7b86e134fa698c01f2289", "title": "Semi-adversarial Networks: Convolutional Autoencoders for Imparting Privacy to Face Images", "addresses": [{"address": "International Institute of Information Technology", "lat": "17.44549570", "lng": "78.34854698", "type": "edu"}, {"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1712.00321.pdf"}, {"id": "d78fbd11f12cbc194e8ede761d292dc2c02d38a2", "title": "Enhancing Gray Scale Images for Face Detection under Unstable Lighting Condition", "addresses": [{"address": "University of Dschang", "lat": "5.44094480", "lng": "10.07120561", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/d78f/bd11f12cbc194e8ede761d292dc2c02d38a2.pdf"}, {"id": "86d0127e1fd04c3d8ea78401c838af621647dc95", "title": "A Novel Multi-Task Tensor Correlation Neural Network for Facial Attribute Prediction", "addresses": [{"address": "Hunan University", "lat": "26.88111275", "lng": "112.62850666", "type": "edu"}, {"address": "National University of Defense Technology, China", "lat": "28.22902090", "lng": "112.99483204", "type": "edu"}, {"address": "University of Texas at San Antonio", "lat": "29.58333105", "lng": "-98.61944505", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.02810.pdf"}, {"id": "103a7c3eba36792886ae8005f6492332e6b05bad", "title": "Facial Recognition with Encoded Local Projections", "addresses": [{"address": "University of Waterloo", "lat": "43.47061295", "lng": "-80.54724732", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.06218.pdf"}, {"id": "35d42f4e7a1d898bc8e2d052c38e1106f3e80188", "title": "Human and algorithm performance on the PaSC face Recognition Challenge", "addresses": [{"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}, {"address": "University of Texas at Dallas", "lat": "32.98207990", "lng": "-96.75662780", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/BTAS.2015.7358765"}, {"id": "06d7ef72fae1be206070b9119fb6b61ce4699587", "title": "On One-Shot Similarity Kernels: Explicit Feature Maps and Properties", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Middlesex University", "lat": "51.59029705", "lng": "-0.22963221", "type": "edu"}, {"address": "University of Patras", "lat": "38.28994820", "lng": "21.78864690", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Zafeiriou_On_One-Shot_Similarity_2013_ICCV_paper.pdf"}, {"id": "c17c7b201cfd0bcd75441afeaa734544c6ca3416", "title": "Layerwise Class-Aware Convolutional Neural Network", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "Southeast University", "lat": "32.05752790", "lng": "118.78682252", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TCSVT.2016.2587389"}, {"id": "e6da1fcd2a8cda0c69b3d94812caa7d844903007", "title": "Sonicdoor: scaling person identification with ultrasonic sensors by novel modeling of shape, behavior and walking patterns", "addresses": [{"address": "University of Houston", "lat": "29.72079020", "lng": "-95.34406271", "type": "edu"}], "year": "2017", "pdf": "http://dl.acm.org/citation.cfm?id=3137154"}, {"id": "cd74d606e76ecddee75279679d9770cdc0b49861", "title": "Transfer Learning of Structured Representation for Face Recognition", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/TIP.2014.2365725"}, {"id": "2e58ec57d71b2b2a3e71086234dd7037559cc17e", "title": "A Gender Recognition System from Facial Image", "addresses": [{"address": "Institute of Information Technology", "lat": "23.72898990", "lng": "90.39826820", "type": "edu"}, {"address": "University of Dhaka", "lat": "23.73169570", "lng": "90.39652750", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/2e58/ec57d71b2b2a3e71086234dd7037559cc17e.pdf"}, {"id": "8f99f7ccb85af6d4b9e015a9b215c529126e7844", "title": "Face image-based age and gender estimation with consideration of ethnic difference", "addresses": [{"address": "Korea Advanced Institute of Science and Technology", "lat": "36.36971910", "lng": "127.36253700", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ROMAN.2017.8172359"}, {"id": "4b605e6a9362485bfe69950432fa1f896e7d19bf", "title": "A Comparison of Human and Automated Face Verification Accuracy on Unconstrained Image Sets", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2016, "pdf": "http://biometrics.cse.msu.edu/Publications/Face/BlantonAllenMillerKalkaJain_CVPRWB2016_HID.pdf"}, {"id": "2af2b74c3462ccff3a6881ff7cf4f321b3242fa9", "title": "Name-Face Association in Web Videos: A Large-Scale Dataset, Baselines, and Open Issues", "addresses": [{"address": "City University of Hong Kong", "lat": "22.34000115", "lng": "114.16970291", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "Fudan University", "lat": "31.30104395", "lng": "121.50045497", "type": "edu"}], "year": 2014, "pdf": "http://yugangjiang.info/publication/JCST-nameface.pdf"}, {"id": "9ff931ca721d50e470e1a38e583c7b18b6cdc2cc", "title": "An Overview and Empirical Comparison of Distance Metric Learning Methods", "addresses": [{"address": "University of Houston", "lat": "29.72079020", "lng": "-95.34406271", "type": "edu"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7407637"}, {"id": "578117ff493d691166fefc52fd61bad70d8752a9", "title": "Dealing with occlusions in face recognition by region-based fusion", "addresses": [{"address": "Universidad Autonoma de Madrid", "lat": "40.48256135", "lng": "-3.69060790", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/CCST.2016.7815707"}, {"id": "3cea3aba77649d718991d0cb30135887267c11e8", "title": "Adversarial Attack Type I: Generating False Positives", "addresses": [{"address": "Shanghai Jiao Tong University", "lat": "31.20081505", "lng": "121.42840681", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.00594.pdf"}, {"id": "053931267af79a89791479b18d1b9cde3edcb415", "title": "Attributes for Improved Attributes: A Multi-Task Network Utilizing Implicit and Explicit Relationships for Facial Attribute Classification", "addresses": [{"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/0539/31267af79a89791479b18d1b9cde3edcb415.pdf"}, {"id": "02467703b6e087799e04e321bea3a4c354c5487d", "title": "Grouper: Optimizing Crowdsourced Face Annotations", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.27"}, {"id": "afe9cfba90d4b1dbd7db1cf60faf91f24d12b286", "title": "Principal Directions of Synthetic Exact Filters for Robust Real-Time Eye Localization", "addresses": [{"address": "University of Ljubljana", "lat": "46.05015580", "lng": "14.46907327", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/afe9/cfba90d4b1dbd7db1cf60faf91f24d12b286.pdf"}, {"id": "4ff4c27e47b0aa80d6383427642bb8ee9d01c0ac", "title": "Deep Convolutional Neural Networks and Support Vector Machines for Gender Recognition", "addresses": [{"address": "University of Groningen", "lat": "53.21967825", "lng": "6.56251482", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/SSCI.2015.37"}, {"id": "0ce3a786aed896d128f5efdf78733cc675970854", "title": "Learning the Face Prior for Bayesian Face Recognition", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/3689/2b6bb4848a9c21158b8eded7f14a6654dd7e.pdf"}, {"id": "9fc993aeb0a007ccfaca369a9a8c0ccf7697261d", "title": "Context-Aware Local Binary Feature Learning for Face Recognition", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7936534"}, {"id": "746c0205fdf191a737df7af000eaec9409ede73f", "title": "Investigating Nuisances in DCNN-Based Face Recognition", "addresses": [{"address": "University of Florence", "lat": "43.77764260", "lng": "11.25976500", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8423119"}, {"id": "3802c97f925cb03bac91d9db13d8b777dfd29dcc", "title": "Non-parametric Bayesian Constrained Local Models", "addresses": [{"address": "Institute of Systems and Robotics", "lat": "53.83383710", "lng": "10.70359390", "type": "edu"}], "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.232"}, {"id": "5957936195c10521dadc9b90ca9b159eb1fc4871", "title": "LBP-ferns-based feature extraction for robust facial recognition", "addresses": [{"address": "Korea University", "lat": "37.59014110", "lng": "127.03623180", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/TCE.2016.7838098"}, {"id": "d0d75a7116a76ccd98a3aeb6f6fff10ba91de1c1", "title": "Constrained Metric Learning by Permutation Inducing Isometries", "addresses": [{"address": "Qatar University", "lat": "25.37461295", "lng": "51.48980354", "type": "edu"}, {"address": "University of Western Australia", "lat": "-31.95040445", "lng": "115.79790037", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/TIP.2015.2502144"}, {"id": "79db191ca1268dc88271abef3179c4fe4ee92aed", "title": "Facial Expression Based Automatic Album Creation", "addresses": [{"address": "Australian National University", "lat": "-35.27769990", "lng": "149.11852700", "type": "edu"}, {"address": "University of Canberra", "lat": "-35.23656905", "lng": "149.08446994", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/79db/191ca1268dc88271abef3179c4fe4ee92aed.pdf"}, {"id": "3983370efe7a7521bde255017171724d845b3383", "title": "Learning Discriminators as Energy Networks in Adversarial Learning", "addresses": [{"address": "University of Technology Sydney", "lat": "-33.88096510", "lng": "151.20107299", "type": "edu"}, {"address": "University of Iowa", "lat": "41.66590000", "lng": "-91.57310307", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.01152.pdf"}, {"id": "faf19885431cb39360158982c3a1127f6090a1f6", "title": "Inheritable Fisher vector feature for kinship verification", "addresses": [{"address": "New Jersey Institute of Technology", "lat": "40.74230250", "lng": "-74.17928172", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/BTAS.2015.7358768"}, {"id": "c4f3185f010027a0a97fcb9753d74eb27a9cfd3e", "title": "Learning to classify gender from four million images", "addresses": [{"address": "University of Bristol", "lat": "51.45848370", "lng": "-2.60977520", "type": "edu"}], "year": "2015", "pdf": "http://doi.org/10.1016/j.patrec.2015.02.006"}, {"id": "b68452e28951bf8db5f1193eca3a8fd9e2d0d7ef", "title": "Approximate radial gradient transform based face recognition", "addresses": [{"address": "Mangalore University", "lat": "12.81608485", "lng": "74.92449278", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICACCI.2015.7275752"}, {"id": "5e0e516226413ea1e973f1a24e2fdedde98e7ec0", "title": "The Invariance Hypothesis and the Ventral Stream", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/74ce/97da57ec848db660ee69dec709f226c74f43.pdf"}, {"id": "d8896861126b7fd5d2ceb6fed8505a6dff83414f", "title": "In-plane Rotational Alignment of Faces by Eye and Eye-pair Detection", "addresses": [{"address": "University of Groningen", "lat": "53.21967825", "lng": "6.56251482", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/d889/6861126b7fd5d2ceb6fed8505a6dff83414f.pdf"}, {"id": "70d2ab1af0edd5c0a30d576a5d4aa397c4f92d3e", "title": "Elastic preserving projections based on L1-norm maximization", "addresses": [{"address": "Beihang University", "lat": "39.98083330", "lng": "116.34101249", "type": "edu"}], "year": "2018", "pdf": "http://doi.org/10.1007/s11042-018-5608-2"}, {"id": "b2cd92d930ed9b8d3f9dfcfff733f8384aa93de8", "title": "HyperFace: A Deep Multi-task Learning Framework for Face Detection, Landmark Localization, Pose Estimation, and Gender Recognition", "addresses": [{"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/b2cd/92d930ed9b8d3f9dfcfff733f8384aa93de8.pdf"}, {"id": "148eb413bede35487198ce7851997bf8721ea2d6", "title": "People Search in Surveillance Videos", "addresses": [{"address": "IBM Research, North Carolina", "lat": "35.90422720", "lng": "-78.85565763", "type": "company"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/148e/b413bede35487198ce7851997bf8721ea2d6.pdf"}, {"id": "e3e2c106ccbd668fb9fca851498c662add257036", "title": "Appearance, context and co-occurrence ensembles for identity recognition in personal photo collections", "addresses": [{"address": "University of Colorado at Colorado Springs", "lat": "38.89646790", "lng": "-104.80505940", "type": "edu"}], "year": 2013, "pdf": "http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-et-al-Ensembles.pdf"}, {"id": "d22b378fb4ef241d8d210202893518d08e0bb213", "title": "Random Faces Guided Sparse Many-to-One Encoder for Pose-Invariant Face Recognition", "addresses": [{"address": "Northeastern University", "lat": "42.33836680", "lng": "-71.08793524", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Zhang_Random_Faces_Guided_2013_ICCV_paper.pdf"}, {"id": "dbb9601a1d2febcce4c07dd2b819243d81abb2c2", "title": "Landmark Free Face Attribute Prediction", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "SAP Innovation Center Network, Singapore", "lat": "1.27486000", "lng": "103.79778700", "type": "company"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8361884"}, {"id": "8633732d9f787f8497c2696309c7d70176995c15", "title": "Multi-objective convolutional learning for face labeling", "addresses": [{"address": "Baidu Research, USA", "lat": "37.40922650", "lng": "-122.02366150", "type": "company"}, {"address": "University of California, Merced", "lat": "37.36566745", "lng": "-120.42158888", "type": "edu"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298967"}, {"id": "7fb5006b6522436ece5bedf509e79bdb7b79c9a7", "title": "Multi-Task Convolutional Neural Network for Face Recognition", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/7fb5/006b6522436ece5bedf509e79bdb7b79c9a7.pdf"}, {"id": "368e99f669ea5fd395b3193cd75b301a76150f9d", "title": "One-to-many face recognition with bilinear CNNs", "addresses": [{"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1506.01342.pdf"}, {"id": "c18a03568d4b512a0d8380cbb1fbf6bd56d11f05", "title": "A Wearable IoT with Complex Artificial Perception Embedding for Alzheimer Patients", "addresses": [{"address": "Electrical and Computer Engineering", "lat": "33.58667840", "lng": "-101.87539204", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8430403"}, {"id": "a6b5ffb5b406abfda2509cae66cdcf56b4bb3837", "title": "One Shot Similarity Metric Learning for Action Recognition", "addresses": [{"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}, {"address": "Open University", "lat": "52.02453775", "lng": "-0.70927481", "type": "edu"}, {"address": "Weizmann Institute of Science", "lat": "31.90784990", "lng": "34.81334092", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/bce2/02717ce134b317b39f0a18151659d643875b.pdf"}, {"id": "4f36c14d1453fc9d6481b09c5a09e91d8d9ee47a", "title": "Video-Based Face Recognition Using the Intra/Extra-Personal Difference Dictionary", "addresses": [{"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/4f36/c14d1453fc9d6481b09c5a09e91d8d9ee47a.pdf"}, {"id": "c07ab025d9e3c885ad5386e6f000543efe091c4b", "title": "Preserving Model Privacy for Machine Learning in Distributed Systems", "addresses": [{"address": "Binghamton University", "lat": "42.09580770", "lng": "-75.91455689", "type": "edu"}, {"address": "University of Florida", "lat": "29.63287840", "lng": "-82.34901330", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8302601"}, {"id": "5798055e11e25c404b1b0027bc9331bcc6e00555", "title": "PDSS: patch-descriptor-similarity space for effective face verification", "addresses": [{"address": "Peking University", "lat": "39.99223790", "lng": "116.30393816", "type": "edu"}], "year": 2012, "pdf": "http://doi.acm.org/10.1145/2393347.2396357"}, {"id": "c43ed9b34cad1a3976bac7979808eb038d88af84", "title": "Semi-supervised Adversarial Learning to Generate Photorealistic Face Images of New Identities from 3D Morphable Model", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.03675.pdf"}, {"id": "c98983592777952d1751103b4d397d3ace00852d", "title": "Face Synthesis from Facial Identity Features", "addresses": [{"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/c989/83592777952d1751103b4d397d3ace00852d.pdf"}, {"id": "48174c414cfce7f1d71c4401d2b3d49ba91c5338", "title": "Robust Performance-driven 3D Face Tracking in Long Range Depth Scenes", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}, {"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/4817/4c414cfce7f1d71c4401d2b3d49ba91c5338.pdf"}, {"id": "c2e03efd8c5217188ab685e73cc2e52c54835d1a", "title": "Deep tree-structured face: A unified representation for multi-task facial biometrics", "addresses": [{"address": "University of Tennessee", "lat": "35.95424930", "lng": "-83.93073950", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477585"}, {"id": "21959bc56a160ebd450606867dce1462a913afab", "title": "Face recognition based on manifold constrained joint sparse sensing with K-SVD", "addresses": [{"address": "Beijing Jiaotong University", "lat": "39.94976005", "lng": "116.33629046", "type": "edu"}, {"address": "Curtin University", "lat": "-32.00686365", "lng": "115.89691775", "type": "edu"}, {"address": "Shanghai University", "lat": "31.32235655", "lng": "121.38400941", "type": "edu"}], "year": "2018", "pdf": "http://doi.org/10.1007/s11042-018-6071-9"}, {"id": "06518858bd99cddf9bc9200fac5311fc29ac33b4", "title": "Sparse Low-Rank Component-Based Representation for Face Recognition With Low-Quality Images", "addresses": [{"address": "East China Normal University", "lat": "31.22849230", "lng": "121.40211389", "type": "edu"}, {"address": "Tongji University", "lat": "31.28473925", "lng": "121.49694909", "type": "edu"}], "year": "2019", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8392777"}, {"id": "f1d6da83dcf71eda45a56a86c5ae13e7f45a8536", "title": "A Secure Face-Verification Scheme Based on Homomorphic Encryption and Deep Neural Networks", "addresses": [{"address": "Beijing University of Technology", "lat": "39.87391435", "lng": "116.47722285", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ACCESS.2017.2737544"}, {"id": "337dd4aaca2c5f9b5d2de8e0e2401b5a8feb9958", "title": "Data-specific Adaptive Threshold for Face Recognition and Authentication", "addresses": [{"address": "Institute of Information Science", "lat": "25.04107280", "lng": "121.61475620", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.11160.pdf"}, {"id": "4d16337cc0431cd43043dfef839ce5f0717c3483", "title": "A Scalable and Privacy-Aware IoT Service for Live Video Analytics", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/4d16/337cc0431cd43043dfef839ce5f0717c3483.pdf"}, {"id": "1d696a1beb42515ab16f3a9f6f72584a41492a03", "title": "Deeply learned face representations are sparse, selective, and robust", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2015, "pdf": "http://www.ee.cuhk.edu.hk/~xgwang/papers/sunWTcvpr15.pdf"}, {"id": "683ec608442617d11200cfbcd816e86ce9ec0899", "title": "Dual Linear Regression Based Classification for Face Cluster Recognition", "addresses": [{"address": "University of Northern British Columbia", "lat": "53.89256620", "lng": "-122.81471592", "type": "edu"}], "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.342"}, {"id": "65293ecf6a4c5ab037a2afb4a9a1def95e194e5f", "title": "Face , Age and Gender Recognition using Local Descriptors", "addresses": [{"address": "University of Ottawa", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/6529/3ecf6a4c5ab037a2afb4a9a1def95e194e5f.pdf"}, {"id": "25d514d26ecbc147becf4117512523412e1f060b", "title": "Annotated crowd video face database", "addresses": [{"address": "Indian Institute of Technology Delhi", "lat": "28.54632595", "lng": "77.27325504", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICB.2015.7139083"}, {"id": "e40cb4369c6402ae53c81ce52b73df3ef89f578b", "title": "Facial image clustering in stereoscopic videos using double spectral analysis", "addresses": [{"address": "Aristotle University of Thessaloniki", "lat": "40.62984145", "lng": "22.95889350", "type": "edu"}], "year": "2015", "pdf": "http://doi.org/10.1016/j.image.2015.01.009"}, {"id": "2d3482dcff69c7417c7b933f22de606a0e8e42d4", "title": "Labeled Faces in the Wild : Updates and New Reporting Procedures", "addresses": [{"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf"}, {"id": "4ea4116f57c5d5033569690871ba294dc3649ea5", "title": "Multi-View Face Alignment Using 3D Shape Model for View Estimation", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/4ea4/116f57c5d5033569690871ba294dc3649ea5.pdf"}, {"id": "7cffcb4f24343a924a8317d560202ba9ed26cd0b", "title": "The unconstrained ear recognition challenge", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Islamic Azad University", "lat": "34.84529990", "lng": "48.55962120", "type": "edu"}, {"address": "University of Colorado, Colorado Springs", "lat": "38.89207560", "lng": "-104.79716389", "type": "edu"}, {"address": "University of Ljubljana", "lat": "46.05015580", "lng": "14.46907327", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.06997.pdf"}, {"id": "185263189a30986e31566394680d6d16b0089772", "title": "Efficient Annotation of Objects for Video Analysis", "addresses": [{"address": "International Institute of Information Technology", "lat": "17.44549570", "lng": "78.34854698", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/1852/63189a30986e31566394680d6d16b0089772.pdf"}, {"id": "5da827fe558fb2e1124dcc84ef08311241761726", "title": "Attribute preserved face de-identification", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139096"}, {"id": "1de23d7fe718d9fab0159f58f422099e44ad3f0a", "title": "Locality Preserving Collaborative Representation for Face Recognition", "addresses": [{"address": "Xiamen University", "lat": "24.43994190", "lng": "118.09301781", "type": "edu"}], "year": "2016", "pdf": "http://doi.org/10.1007/s11063-016-9558-2"}, {"id": "4f7b92bd678772552b3c3edfc9a7c5c4a8c60a8e", "title": "Deep Density Clustering of Unconstrained Faces", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/4f7b/92bd678772552b3c3edfc9a7c5c4a8c60a8e.pdf"}, {"id": "5bb53fb36a47b355e9a6962257dd465cd7ad6827", "title": "Mask-off: Synthesizing Face Images in the Presence of Head-mounted Displays", "addresses": [{"address": "North Carolina Central University", "lat": "35.97320905", "lng": "-78.89755054", "type": "edu"}, {"address": "University of Kentucky", "lat": "38.03337420", "lng": "-84.50177580", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/5bb5/3fb36a47b355e9a6962257dd465cd7ad6827.pdf"}, {"id": "b18858ad6ec88d8b443dffd3e944e653178bc28b", "title": "Trojaning Attack on Neural Networks", "addresses": [{"address": "Purdue University", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/b188/58ad6ec88d8b443dffd3e944e653178bc28b.pdf"}, {"id": "ee7093e91466b81d13f4d6933bcee48e4ee63a16", "title": "Discovering Person Identity via Large-Scale Observations", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/ee70/93e91466b81d13f4d6933bcee48e4ee63a16.pdf"}, {"id": "687e17db5043661f8921fb86f215e9ca2264d4d2", "title": "A robust elastic and partial matching metric for face recognition", "addresses": [{"address": "Microsoft", "lat": "47.64233180", "lng": "-122.13693020", "type": "company"}], "year": 2009, "pdf": "http://www.ece.northwestern.edu/~ganghua/publication/ICCV09a.pdf"}, {"id": "fffe5ab3351deab81f7562d06764551422dbd9c4", "title": "Fully automated facial picture evaluation using high level attributes", "addresses": [{"address": "GIPSA-Lab, Grenoble, France", "lat": "45.19292450", "lng": "5.76619830", "type": "edu"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163114"}, {"id": "1ba9d12f24ac04f0309e8ff9b0162c6e18d97dc3", "title": "Robust Face Recognition with Deep Multi-View Representation Learning", "addresses": [{"address": "Hefei University of Technology", "lat": "31.84691800", "lng": "117.29053367", "type": "edu"}, {"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": 2016, "pdf": "http://doi.acm.org/10.1145/2964284.2984061"}, {"id": "870433ba89d8cab1656e57ac78f1c26f4998edfb", "title": "Regressing Robust and Discriminative 3D Morphable Models with a Very Deep Neural Network", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.163"}, {"id": "e3a6e5a573619a97bd6662b652ea7d088ec0b352", "title": "Compare and Contrast: Learning Prominent Visual Differences", "addresses": [{"address": "University of Texas at Austin", "lat": "30.28415100", "lng": "-97.73195598", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.00112.pdf"}, {"id": "60737db62fb5fab742371709485e4b2ddf64b7b2", "title": "Crowdsourced Selection on Multi-Attribute Data", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2017, "pdf": "http://doi.acm.org/10.1145/3132847.3132891"}, {"id": "288964068cd87d97a98b8bc927d6e0d2349458a2", "title": "Mean-Variance Loss for Deep Age Estimation from a Face", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/2889/64068cd87d97a98b8bc927d6e0d2349458a2.pdf"}, {"id": "59d45281707b85a33d6f50c6ac6b148eedd71a25", "title": "Rank Minimization across Appearance and Shape for AAM Ensemble Fitting", "addresses": [{"address": "Queensland University of Technology", "lat": "-27.47715625", "lng": "153.02841004", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Cheng_Rank_Minimization_across_2013_ICCV_paper.pdf"}, {"id": "818ecc8c8d4dc398b01a852df90cb8d972530fa5", "title": "Unsupervised Training for 3D Morphable Model Regression", "addresses": [{"address": "Princeton University", "lat": "40.34829285", "lng": "-74.66308325", "type": "edu"}, {"address": "MIT CSAIL", "lat": "42.36194070", "lng": "-71.09043780", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.06098.pdf"}, {"id": "d99b5ee3e2d7e3a016fbc5fd417304e15efbd1f8", "title": "A Novel Two-stage Learning Pipeline for Deep Neural Networks", "addresses": [{"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}], "year": "2017", "pdf": "http://doi.org/10.1007/s11063-017-9578-6"}, {"id": "4b3f425274b0c2297d136f8833a31866db2f2aec", "title": "Toward Open-Set Face Recognition", "addresses": [{"address": "University of Colorado, Colorado Springs", "lat": "38.89207560", "lng": "-104.79716389", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.85"}, {"id": "12003a7d65c4f98fb57587fd0e764b44d0d10125", "title": "Face recognition in the wild with the Probabilistic Gabor-Fisher Classifier", "addresses": [{"address": "University of Ljubljana", "lat": "46.05015580", "lng": "14.46907327", "type": "edu"}], "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2015.7284835"}, {"id": "7a09e8f65bd85d4c79f0ae90d4e2685869a9894f", "title": "Face and Hair Region Labeling Using Semi-Supervised Spectral Clustering-Based Multiple Segmentations", "addresses": [{"address": "Korea Advanced Institute of Science and Technology", "lat": "36.36971910", "lng": "127.36253700", "type": "edu"}, {"address": "Korea Institute of Oriental Medicine", "lat": "36.39918400", "lng": "127.39465600", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/TMM.2016.2551698"}, {"id": "2bcd9b2b78eb353ea57cf50387083900eae5384a", "title": "Image ranking and retrieval based on multi-attribute queries", "addresses": [{"address": "IBM Thomas J. Watson Research Center", "lat": "41.21002475", "lng": "-73.80407056", "type": "company"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2011", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995329"}, {"id": "77c5437107f8138d48cb7e10b2b286fa51473678", "title": "A pseudo ensemble convolutional neural networks", "addresses": [{"address": "Electronics and Telecommunications Research Institute, Daejeon, Korea", "lat": "36.38376500", "lng": "127.36694000", "type": "edu"}, {"address": "University of Science and Technology, Korea", "lat": "36.38513950", "lng": "127.36834130", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/URAI.2016.7734005"}, {"id": "872dfdeccf99bbbed7c8f1ea08afb2d713ebe085", "title": "L2-constrained Softmax Loss for Discriminative Face Verification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1703.09507.pdf"}, {"id": "57ca530e9acb63487e8591cb6efb89473aa1e5b4", "title": "Multilayer Surface Albedo for Face Recognition With Reference Images in Bad Lighting Conditions", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/TIP.2014.2356292"}, {"id": "511a8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7", "title": "A Community Detection Approach to Cleaning Extremely Large Face Database", "addresses": [{"address": "National University of Defense Technology, China", "lat": "28.22902090", "lng": "112.99483204", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/511a/8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7.pdf"}, {"id": "a8748a79e8d37e395354ba7a8b3038468cb37e1f", "title": "Seeing the Forest from the Trees: A Holistic Approach to Near-Infrared Heterogeneous Face Recognition", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}, {"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.47"}, {"id": "be28ed1be084385f5d389db25fd7f56cd2d7f7bf", "title": "Exploring computation-communication tradeoffs in camera systems", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1706.03864.pdf"}, {"id": "0e652a99761d2664f28f8931fee5b1d6b78c2a82", "title": "Making a Science of Model Search", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/0e65/2a99761d2664f28f8931fee5b1d6b78c2a82.pdf"}, {"id": "6604fd47f92ce66dd0c669dd66b347b80e17ebc9", "title": "Simultaneous Cascaded Regression", "addresses": [{"address": "Institute of Systems and Robotics", "lat": "53.83383710", "lng": "10.70359390", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/6604/fd47f92ce66dd0c669dd66b347b80e17ebc9.pdf"}, {"id": "46e72046a9bb2d4982d60bcf5c63dbc622717f0f", "title": "Learning Discriminative Features with Class Encoder", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1605.02424.pdf"}, {"id": "2a92bda6dbd5cce5894f7d370d798c07fa8783f4", "title": "Class-Specific Kernel Fusion of Multiple Descriptors for Face Verification Using Multiscale Binarised Statistical Image Features", "addresses": [{"address": "Urmia University", "lat": "37.52914535", "lng": "45.04886077", "type": "edu"}, {"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/TIFS.2014.2359587"}, {"id": "52bf00df3b970e017e4e2f8079202460f1c0e1bd", "title": "Learning High-level Prior with Convolutional Neural Networks for Semantic Segmentation", "addresses": [{"address": "Hong Kong University of Science and Technology", "lat": "22.33863040", "lng": "114.26203370", "type": "edu"}, {"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}, {"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/52bf/00df3b970e017e4e2f8079202460f1c0e1bd.pdf"}, {"id": "0081e2188c8f34fcea3e23c49fb3e17883b33551", "title": "Training Deep Face Recognition Systems with Synthetic Data", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/0081/e2188c8f34fcea3e23c49fb3e17883b33551.pdf"}, {"id": "2201f187a7483982c2e8e2585ad9907c5e66671d", "title": "Joint Face Alignment and 3D Face Reconstruction", "addresses": [{"address": "Sichuan University, Chengdu", "lat": "30.64276900", "lng": "104.06751175", "type": "edu"}, {"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/1cad/9aa5095733b56e998ad0cd396e89c2bc9928.pdf"}, {"id": "4bd3de97b256b96556d19a5db71dda519934fd53", "title": "Latent Factor Guided Convolutional Neural Networks for Age-Invariant Face Recognition", "addresses": [{"address": "South China University of Technology", "lat": "23.05020420", "lng": "113.39880323", "type": "edu"}, {"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.529"}, {"id": "831d661d657d97a07894da8639a048c430c5536d", "title": "Weakly Supervised Facial Analysis with Dense Hyper-Column Features", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.19"}, {"id": "0d746111135c2e7f91443869003d05cde3044beb", "title": "Partial face detection for continuous authentication", "addresses": [{"address": "State University of New Jersey", "lat": "40.51865195", "lng": "-74.44099801", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICIP.2016.7532908"}, {"id": "3bd10f7603c4f5a4737c5613722124787d0dd818", "title": "An Efficient Joint Formulation for Bayesian Face Verification", "addresses": [{"address": "Microsoft", "lat": "47.64233180", "lng": "-122.13693020", "type": "company"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7415949"}, {"id": "38a2661b6b995a3c4d69e7d5160b7596f89ce0e6", "title": "Randomized Intraclass-Distance Minimizing Binary Codes for face recognition", "addresses": [{"address": "Colorado State University", "lat": "40.57093580", "lng": "-105.08655256", "type": "edu"}, {"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}], "year": 2014, "pdf": "http://www.cs.colostate.edu/~draper/papers/zhang_ijcb14.pdf"}, {"id": "aa1129780cc496918085cd0603a774345c353c54", "title": "Evolutionary Cost-Sensitive Discriminative Learning With Application to Vision and Olfaction", "addresses": [{"address": "Chongqing University", "lat": "29.50841740", "lng": "106.57858552", "type": "edu"}, {"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7779010"}, {"id": "0de91641f37b0a81a892e4c914b46d05d33fd36e", "title": "RAPS: Robust and Efficient Automatic Construction of Person-Specific Deformable Models", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "University of Twente", "lat": "52.23801390", "lng": "6.85667610", "type": "edu"}], "year": 2014, "pdf": "https://ibug.doc.ic.ac.uk/media/uploads/documents/raps.pdf"}, {"id": "2e091b311ac48c18aaedbb5117e94213f1dbb529", "title": "Collaborative Facial Landmark Localization for Transferring Annotations Across Datasets", "addresses": [{"address": "University of Wisconsin Madison", "lat": "43.07982815", "lng": "-89.43066425", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/b1a1/a049f1d78f6e3d072236237c467292ccd537.pdf"}, {"id": "00075519a794ea546b2ca3ca105e2f65e2f5f471", "title": "Generating a Large, Freely-Available Dataset for Face-Related Algorithms", "addresses": [{"address": "Amherst College", "lat": "42.37289000", "lng": "-72.51881400", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/0007/5519a794ea546b2ca3ca105e2f65e2f5f471.pdf"}, {"id": "d6791b98353aa113d79f6fb96335aa6c7ea3b759", "title": "Collaborative Random Faces-Guided Encoders for Pose-Invariant Face Representation Learning", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}, {"address": "University of Massachusetts Dartmouth", "lat": "41.62772475", "lng": "-71.00724501", "type": "edu"}, {"address": "Northeastern University", "lat": "42.33836680", "lng": "-71.08793524", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/TNNLS.2017.2648122"}, {"id": "7caa3a74313f9a7a2dd5b4c2cd7f825d895d3794", "title": "Markov Chain Monte Carlo for Automated Face Image Analysis", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": "2016", "pdf": "http://doi.org/10.1007/s11263-016-0967-5"}, {"id": "1860b8f63ce501bd0dfa9e6f2debc080e88d9baa", "title": "Local Large-Margin Multi-Metric Learning for Face and Kinship Verification", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7894195"}, {"id": "7a131fafa7058fb75fdca32d0529bc7cb50429bd", "title": "Beyond Face Rotation: Global and Local Perception GAN for Photorealistic and Identity Preserving Frontal View Synthesis", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1704.04086.pdf"}, {"id": "2cd7821fcf5fae53a185624f7eeda007434ae037", "title": "Exploring the geo-dependence of human face appearance", "addresses": [{"address": "University of Kentucky", "lat": "38.03337420", "lng": "-84.50177580", "type": "edu"}], "year": 2014, "pdf": "http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf"}, {"id": "270acff7916589a6cc9ca915b0012ffcb75d4899", "title": "On the Applications of Robust PCA in Image and Video Processing", "addresses": [{"address": "New York University", "lat": "40.72925325", "lng": "-73.99625394", "type": "edu"}, {"address": "University of Warwick", "lat": "52.37931310", "lng": "-1.56042520", "type": "edu"}, {"address": "Peking University", "lat": "39.99223790", "lng": "116.30393816", "type": "edu"}, {"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8425659"}, {"id": "588bed36b3cc9e2f26c39b5d99d6687f36ae1177", "title": "Sparsely Encoded Local Descriptor for face recognition", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}, {"address": "Chinese Academy of Science", "lat": "39.90419990", "lng": "116.40739630", "type": "edu"}], "year": "2011", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771389"}, {"id": "8bdf6f03bde08c424c214188b35be8b2dec7cdea", "title": "Inference Attacks Against Collaborative Learning", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.04049.pdf"}, {"id": "09b43b59879d59493df2a93c216746f2cf50f4ac", "title": "Deep Transfer Metric Learning", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_036_ext.pdf"}, {"id": "6342a4c54835c1e14159495373ab18b4233d2d9b", "title": "Towards Pose-robust Face Recognition on Video", "addresses": [{"address": "Queensland University of Technology", "lat": "-27.47715625", "lng": "153.02841004", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/6342/a4c54835c1e14159495373ab18b4233d2d9b.pdf"}, {"id": "34108098e1a378bc15a5824812bdf2229b938678", "title": "Reconstructive Sparse Code Transfer for Contour Detection and Semantic Labeling", "addresses": [{"address": "California Institute of Technology", "lat": "34.13710185", "lng": "-118.12527487", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/3410/8098e1a378bc15a5824812bdf2229b938678.pdf"}, {"id": "1fd6004345245daf101c98935387e6ef651cbb55", "title": "Learning Symmetry Features for Face Detection Based on Sparse Group Lasso", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/1fd6/004345245daf101c98935387e6ef651cbb55.pdf"}, {"id": "5dc52c64991c655a12936867594326cf6352eb8e", "title": "Constructing Local Binary Pattern Statistics by Soft Voting", "addresses": [{"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": "2013", "pdf": "https://pdfs.semanticscholar.org/5dc5/2c64991c655a12936867594326cf6352eb8e.pdf"}, {"id": "2c424f21607ff6c92e640bfe3da9ff105c08fac4", "title": "Learning Structured Output Representation using Deep Conditional Generative Models", "addresses": [{"address": "University of Michigan", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/3f25/e17eb717e5894e0404ea634451332f85d287.pdf"}, {"id": "241d2c517dbc0e22d7b8698e06ace67de5f26fdf", "title": "Online, Real-Time Tracking Using a Category-to-Individual Detector", "addresses": [{"address": "California Institute of Technology", "lat": "34.13710185", "lng": "-118.12527487", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/bfc3/546fa119443fdcbac3a5723647c2ba0007ac.pdf"}, {"id": "13901473a12061f080b9d54219f16db7d406e769", "title": "High-Order Local Spatial Context Modeling by Spatialized Random Forest", "addresses": [{"address": "Advanced Digital Sciences Center, Singapore", "lat": "1.30372570", "lng": "103.77377630", "type": "edu"}, {"address": "University of Texas at San Antonio", "lat": "29.58333105", "lng": "-98.61944505", "type": "edu"}, {"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "Hefei University of Technology", "lat": "31.84691800", "lng": "117.29053367", "type": "edu"}], "year": 2013, "pdf": "https://doi.org/10.1109/TIP.2012.2222895"}, {"id": "3f5e8f884e71310d7d5571bd98e5a049b8175075", "title": "Making a Science of Model Search: Hyperparameter Optimization in Hundreds of Dimensions for Vision Architectures", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": "2013", "pdf": "https://pdfs.semanticscholar.org/3f5e/8f884e71310d7d5571bd98e5a049b8175075.pdf"}, {"id": "a2b4a6c6b32900a066d0257ae6d4526db872afe2", "title": "Learning Face Image Quality From Human Assessments", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272466"}, {"id": "99facca6fc50cc30f13b7b6dd49ace24bc94f702", "title": "VIPLFaceNet: an open source deep face recognition SDK", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1609.03892.pdf"}, {"id": "d0dd1364411a130448517ba532728d5c2fe78ed9", "title": "On-line machine learning accelerator on digital RRAM-crossbar", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ISCAS.2016.7527183"}, {"id": "9264b390aa00521f9bd01095ba0ba4b42bf84d7e", "title": "Displacement Template with Divide-&-Conquer Algorithm for Significantly Improving Descriptor Based Face Recognition Approaches", "addresses": [{"address": "Aberystwyth University", "lat": "52.41073580", "lng": "-4.05295501", "type": "edu"}, {"address": "University of Northern British Columbia", "lat": "53.89256620", "lng": "-122.81471592", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/9264/b390aa00521f9bd01095ba0ba4b42bf84d7e.pdf"}, {"id": "353b6c1f431feac6edde12b2dde7e6e702455abd", "title": "Multi-scale Patch Based Collaborative Representation for Face Recognition with Margin Distribution Optimization", "addresses": [{"address": "Tianjin University", "lat": "36.20304395", "lng": "117.05842113", "type": "edu"}, {"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/8835/c80f8ad8ebd05771a9bce5a8637efbc4c8e3.pdf"}, {"id": "4377b03bbee1f2cf99950019a8d4111f8de9c34a", "title": "Selective Encoding for Recognizing Unreliably Localized Faces", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2015, "pdf": "http://www.umiacs.umd.edu/~morariu/publications/LiSelectiveEncoderICCV15.pdf"}, {"id": "120bcc9879d953de7b2ecfbcd301f72f3a96fb87", "title": "Report on the FG 2015 Video Person Recognition Evaluation", "addresses": [{"address": "Colorado State University", "lat": "40.57093580", "lng": "-105.08655256", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}, {"address": "Stevens Institute of Technology", "lat": "40.74225200", "lng": "-74.02709490", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}, {"address": "University of Ljubljana", "lat": "46.05015580", "lng": "14.46907327", "type": "edu"}, {"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}, {"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}, {"address": "University of Technology Sydney", "lat": "-33.88096510", "lng": "151.20107299", "type": "edu"}], "year": 2015, "pdf": "http://www.cs.colostate.edu/~vision/pasc/docs/fg2015videoEvalPreprint.pdf"}, {"id": "e00d4e4ba25fff3583b180db078ef962bf7d6824", "title": "Face Verification with Multi-Task and Multi-Scale Features Fusion", "addresses": [{"address": "Northeastern University", "lat": "42.33836680", "lng": "-71.08793524", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/e00d/4e4ba25fff3583b180db078ef962bf7d6824.pdf"}, {"id": "5bde1718253ec28a753a892b0ba82d8e553b6bf3", "title": "Variational Relevance Vector Machine for Tabular Data", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}, {"address": "Lomonosov Moscow State University", "lat": "55.70229715", "lng": "37.53179777", "type": "edu"}, {"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/5bde/1718253ec28a753a892b0ba82d8e553b6bf3.pdf"}, {"id": "4349f17ec319ac8b25c14c2ec8c35f374b958066", "title": "Dynamic Texture Comparison Using Derivative Sparse Representation: Application to Video-Based Face Recognition", "addresses": [{"address": "Tafresh University", "lat": "34.68092465", "lng": "50.05341352", "type": "edu"}, {"address": "University of Technology Sydney", "lat": "-33.88096510", "lng": "151.20107299", "type": "edu"}, {"address": "University of Western Australia", "lat": "-31.95040445", "lng": "115.79790037", "type": "edu"}, {"address": "Griffith University", "lat": "-27.55339750", "lng": "153.05336234", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/THMS.2017.2681425"}, {"id": "b859d1fc1a7ad756815490527319d458fa9af3d2", "title": "Learning Structure and Strength of CNN Filters for Small Sample Size Training", "addresses": [{"address": "Indian Institute of Technology Delhi", "lat": "28.54632595", "lng": "77.27325504", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.11405.pdf"}, {"id": "122f52fadd4854cf6c9287013520eced3c91e71a", "title": "Robust Point Set Matching for Partial Face Recognition", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/TIP.2016.2515987"}, {"id": "771505abd38641454757de75fe751d41e87f89a4", "title": "Learning structured sparse representation for single sample face recognition", "addresses": [{"address": "HoHai University", "lat": "32.05765485", "lng": "118.75500040", "type": "edu"}, {"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}, {"address": "Nantong University", "lat": "31.97474630", "lng": "120.90779264", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8401561"}, {"id": "ac48ecbc7c3c1a7eab08820845d47d6ce197707c", "title": "Iterative Re-Constrained Group Sparse Face Recognition With Adaptive Weights Learning", "addresses": [{"address": "Zhejiang University of Technology", "lat": "30.29315340", "lng": "120.16204580", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2681841"}, {"id": "09f58353e48780c707cf24a0074e4d353da18934", "title": "Unconstrained face recognition: Establishing baseline human performance via crowdsourcing", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}, {"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2014, "pdf": "http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/BestrowdenBishtKlontzJain_CrowdsourcingHumanPeformance_IJCB2014.pdf"}, {"id": "54bb25a213944b08298e4e2de54f2ddea890954a", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Middlesex University", "lat": "51.59029705", "lng": "-0.22963221", "type": "edu"}], "year": 2017, "pdf": "http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf"}, {"id": "0106a2f6251dc9ffc90709c6f0d9b54c1e82326b", "title": "Applying scattering operators for face recognition: A comparative study", "addresses": [{"address": "Institute of Information Science", "lat": "25.04107280", "lng": "121.61475620", "type": "edu"}, {"address": "National Taiwan University", "lat": "25.01682835", "lng": "121.53846924", "type": "edu"}], "year": 2012, "pdf": "http://www.iis.sinica.edu.tw/papers/song/14922-A.pdf"}, {"id": "0ad8149318912b5449085187eb3521786a37bc78", "title": "CP-mtML: Coupled Projection Multi-Task Metric Learning for Large Scale Face Retrieval", "addresses": [{"address": "University of Caen", "lat": "35.02749960", "lng": "135.78154513", "type": "edu"}], "year": 2016, "pdf": "http://arxiv.org/abs/1604.02975"}, {"id": "bc27434e376db89fe0e6ef2d2fabc100d2575ec6", "title": "Faceless Person Recognition; Privacy Implications in Social Media", "addresses": [{"address": "Max Planck Institute for Informatics", "lat": "49.25795660", "lng": "7.04577417", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1607.08438.pdf"}, {"id": "83295bce2340cb87901499cff492ae6ff3365475", "title": "Deep Multi-Center Learning for Face Alignment", "addresses": [{"address": "East China Normal University", "lat": "31.22849230", "lng": "121.40211389", "type": "edu"}, {"address": "Shanghai Jiao Tong University", "lat": "31.20081505", "lng": "121.42840681", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1808.01558.pdf"}, {"id": "5c4f9260762a450892856b189df240f25b5ed333", "title": "Discriminative Elastic-Net Regularized Linear Regression", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}, {"address": "Shenzhen University", "lat": "22.53521465", "lng": "113.93159110", "type": "edu"}, {"address": "University of East Anglia", "lat": "52.62215710", "lng": "1.24091360", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2651396"}, {"id": "57ebeff9273dea933e2a75c306849baf43081a8c", "title": "Deep Convolutional Network Cascade for Facial Point Detection", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Sun_Deep_Convolutional_Network_2013_CVPR_paper.pdf"}, {"id": "9ce97efc1d520dadaa0d114192ca789f23442727", "title": "Teaching Computer Vision: Bringing Research Benchmarks to the Classroom", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2014, "pdf": "http://doi.acm.org/10.1145/2597627"}, {"id": "4793f11fbca4a7dba898b9fff68f70d868e2497c", "title": "Kinship Verification through Transfer Learning", "addresses": [{"address": "SUNY Buffalo", "lat": "42.93362780", "lng": "-78.88394479", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf"}, {"id": "08d55271589f989d90a7edce3345f78f2468a7e0", "title": "Quality Aware Network for Set to Set Recognition", "addresses": [{"address": "University of Sydney", "lat": "-33.88890695", "lng": "151.18943366", "type": "edu"}], "year": 2017, "pdf": "https://arxiv.org/pdf/1704.03373v1.pdf"}, {"id": "3c18fb8ff0f5003fefa8e9dc9bebaf88908d255c", "title": "Is block matching an alternative tool to LBP for face recognition?", "addresses": [{"address": "KTH Royal Institute of Technology, Stockholm", "lat": "59.34986645", "lng": "18.07063213", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/ICIP.2014.7025145"}, {"id": "651cafb2620ab60a0e4f550c080231f20ae6d26e", "title": "4D unconstrained real-time face recognition using a commodity depth camera", "addresses": [{"address": "Singapore University of Technology and Design", "lat": "1.34021600", "lng": "103.96508900", "type": "edu"}, {"address": "University of Groningen", "lat": "53.21967825", "lng": "6.56251482", "type": "edu"}], "year": "2012", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6360717"}, {"id": "56e25358ebfaf8a8b3c7c33ed007e24f026065d0", "title": "V-shaped interval insensitive loss for ordinal classification", "addresses": [{"address": "Czech Technical University", "lat": "50.07642960", "lng": "14.41802312", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1007/s10994-015-5541-9"}, {"id": "ce37e11f4046a4b766b0e3228870ae4f26dddd67", "title": "Learning One-Shot Exemplar SVM from the Web for Face Verification", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/ce37/e11f4046a4b766b0e3228870ae4f26dddd67.pdf"}, {"id": "ab0d227b63b702ba80f70fd053175cd1b2fd28cc", "title": "Boosting Pseudo Census Transform Features for Face Alignment", "addresses": [{"address": "Karlsruhe Institute of Technology", "lat": "49.10184375", "lng": "8.43312560", "type": "edu"}], "year": "2011", "pdf": "https://pdfs.semanticscholar.org/0eed/cda8981740ae2c34ad5809dbdfcd817f2518.pdf"}, {"id": "4c170a0dcc8de75587dae21ca508dab2f9343974", "title": "FaceTracer: A Search Engine for Large Collections of Images with Faces", "addresses": [{"address": "Columbia University", "lat": "40.84198360", "lng": "-73.94368971", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf"}, {"id": "65f0b05052c3145a58c2653821e5429ca62555ce", "title": "Attacks Meet Interpretability: Attribute-steered Detection of Adversarial Samples", "addresses": [{"address": "Purdue University", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.11580.pdf"}, {"id": "edfce091688bc88389dd4877950bd58e00ff1253", "title": "A talking profile to distinguish identical twins", "addresses": [{"address": "Facebook", "lat": "37.39367170", "lng": "-122.08072620", "type": "company"}, {"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553700"}, {"id": "1e3068886b138304ec5a7296702879cc8788143d", "title": "Active Rare Class Discovery and Classification Using Dirichlet Processes", "addresses": [{"address": "Queen Mary University of London", "lat": "51.52472720", "lng": "-0.03931035", "type": "edu"}], "year": "2013", "pdf": "http://doi.org/10.1007/s11263-013-0630-3"}, {"id": "566a39d753c494f57b4464d6bde61bf3593f7ceb", "title": "A Critical Review of Action Recognition Benchmarks", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2013, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2013.43"}, {"id": "16fadde3e68bba301f9829b3f99157191106bd0f", "title": "Utility data annotation with Amazon Mechanical Turk", "addresses": [{"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": "2008", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4562953"}, {"id": "bd379f8e08f88729a9214260e05967f4ca66cd65", "title": "Learning Compositional Visual Concepts with Mutual Consistency", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1711.06148.pdf"}, {"id": "2ff9ffedfc59422a8c7dac418a02d1415eec92f1", "title": "Face Verification Using Boosted Cross-Image Features", "addresses": [{"address": "University of California, Berkeley", "lat": "37.86871260", "lng": "-122.25586815", "type": "edu"}, {"address": "University of Central Florida", "lat": "28.59899755", "lng": "-81.19712501", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/6e3b/778ad384101f792284b42844518f620143aa.pdf"}, {"id": "e22adcd2a6a7544f017ec875ce8f89d5c59e09c8", "title": "Gender Privacy: An Ensemble of Semi Adversarial Networks for Confounding Arbitrary Gender Classifiers", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11936.pdf"}, {"id": "22e678d3e915218a7c09af0d1602e73080658bb7", "title": "Adventures in archiving and using three years of webcam images", "addresses": [{"address": "Washington University", "lat": "38.64804450", "lng": "-90.30996670", "type": "edu"}], "year": 2009, "pdf": "http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2009_WS/data/papers/04/13.pdf"}, {"id": "c3c463a9ee464bb610423b7203300a83a166b500", "title": "Transform-invariant dictionary learning for face recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/ICIP.2014.7025069"}, {"id": "b75eecc879da38138bf3ace9195ae1613fb6e3cc", "title": "Improvement in Detection of Wrong-Patient Errors When Radiologists Include Patient Photographs in Their Interpretation of Portable Chest Radiographs", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1007/s10278-015-9808-2"}, {"id": "2af19b5ff2ca428fa42ef4b85ddbb576b5d9a5cc", "title": "Multi-Region Probabilistic Histograms for Robust and Scalable Identity Inference", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/2af1/9b5ff2ca428fa42ef4b85ddbb576b5d9a5cc.pdf"}, {"id": "919bdc161485615d5ee571b1585c1eb0539822c8", "title": "A ranking model for face alignment with Pseudo Census Transform", "addresses": [{"address": "Karlsruhe Institute of Technology", "lat": "49.10184375", "lng": "8.43312560", "type": "edu"}], "year": 2012, "pdf": "http://ieeexplore.ieee.org/document/6460332/"}, {"id": "a3a2f3803bf403262b56ce88d130af15e984fff0", "title": "Building a Compact Relevant Sample Coverage for Relevance Feedback in Content-Based Image Retrieval", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/e538/e1f6557d2920b449249606f909b665fbb924.pdf"}, {"id": "a192845a7695bdb372cccf008e6590a14ed82761", "title": "A Novel Local Pattern Descriptor—Local Vector Pattern in High-Order Derivative Space for Face Recognition", "addresses": [{"address": "National Central University", "lat": "24.96841805", "lng": "121.19139696", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/TIP.2014.2321495"}, {"id": "8db9188e5137e167bffb3ee974732c1fe5f7a7dc", "title": "Tree-Structured Nuclear Norm Approximation With Applications to Robust Face Recognition", "addresses": [{"address": "University of Macau", "lat": "22.12401870", "lng": "113.54510901", "type": "edu"}, {"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/TIP.2016.2612885"}, {"id": "21e158bcda4e10da88ee8da3799a6144b60d791f", "title": "Population Matching Discrepancy and Applications in Deep Learning", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/21e1/58bcda4e10da88ee8da3799a6144b60d791f.pdf"}, {"id": "2b7ef95822a4d577021df16607bf7b4a4514eb4b", "title": "Emergence of Object-Selective Features in Unsupervised Feature Learning", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/b596/9178f843bfaecd0026d04c41e79bcb9edab5.pdf"}, {"id": "3fe4109ded039ac9d58eb9f5baa5327af30ad8b6", "title": "Spatio-Temporal GrabCut human segmentation for face and pose recovery", "addresses": [{"address": "University of Barcelona", "lat": "41.38689130", "lng": "2.16352385", "type": "edu"}], "year": 2010, "pdf": "http://www.cvc.uab.cat/~ahernandez/files/CVPR2010STGRABCUT.pdf"}, {"id": "28be652db01273289499bc6e56379ca0237506c0", "title": "FaLRR: A fast low rank representation solver", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/3B_018_ext.pdf"}, {"id": "5981c309bd0ffd849c51b1d8a2ccc481a8ec2f5c", "title": "SmartFace: Efficient face detection on smartphones for wireless on-demand emergency networks", "addresses": [{"address": "Philipps-Universit\u00e4t Marburg", "lat": "50.81427010", "lng": "8.77143500", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ICT.2017.7998256"}, {"id": "1a40c2a2d17c52c8b9d20648647d0886e30a60fa", "title": "Hybrid hypergraph construction for facial expression recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICPR.2016.7900283"}, {"id": "70580ed8bc482cad66e059e838e4a779081d1648", "title": "Gender Classification using Multi-Level Wavelets on Real World Face Images", "addresses": [{"address": "Shaheed Zulfikar Ali Bhutto Institute of Science and Technology", "lat": "24.81865870", "lng": "67.03165850", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/7058/0ed8bc482cad66e059e838e4a779081d1648.pdf"}, {"id": "9fc04a13eef99851136eadff52e98eb9caac919d", "title": "Rethinking the Camera Pipeline for Computer Vision", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/9fc0/4a13eef99851136eadff52e98eb9caac919d.pdf"}, {"id": "0ca66283f4fb7dbc682f789fcf6d6732006befd5", "title": "Active Dictionary Learning for Image Representation", "addresses": [{"address": "State University of New Jersey", "lat": "40.51865195", "lng": "-74.44099801", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/0ca6/6283f4fb7dbc682f789fcf6d6732006befd5.pdf"}, {"id": "761304bbd259a9e419a2518193e1ff1face9fd2d", "title": "Robust and Computationally Efficient Face Detection Using Gaussian Derivative Features of Higher Orders", "addresses": [{"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": 2012, "pdf": "https://doi.org/10.1007/978-3-642-33885-4_57"}, {"id": "f5af3c28b290dc797c499283e2d0662570f9ed02", "title": "GenLR-Net : Deep framework for very low resolution face and object recognition with generalization to unseen categories", "addresses": [{"address": "Indian Institute of Science Bangalore", "lat": "13.02223470", "lng": "77.56718325", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/f5af/3c28b290dc797c499283e2d0662570f9ed02.pdf"}, {"id": "a0d6390dd28d802152f207940c7716fe5fae8760", "title": "Bayesian Face Revisited: A Joint Formulation", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/a0d6/390dd28d802152f207940c7716fe5fae8760.pdf"}, {"id": "54ba18952fe36c9be9f2ab11faecd43d123b389b", "title": "Triangular similarity metric learning for face verification", "addresses": [{"address": "University of Lyon", "lat": "45.78332440", "lng": "4.87819840", "type": "edu"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163085"}, {"id": "a2bd81be79edfa8dcfde79173b0a895682d62329", "title": "Multi-Objective Vehicle Routing Problem Applied to Large Scale Post Office Deliveries", "addresses": [{"address": "University of Campinas", "lat": "-27.59539950", "lng": "-48.61542180", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/a2bd/81be79edfa8dcfde79173b0a895682d62329.pdf"}, {"id": "69b2a7533e38c2c8c9a0891a728abb423ad2c7e7", "title": "Manifold based sparse representation for facial understanding in natural images", "addresses": [{"address": "Rochester Institute of Technology", "lat": "43.08250655", "lng": "-77.67121663", "type": "edu"}], "year": 2013, "pdf": "https://doi.org/10.1016/j.imavis.2013.03.003"}, {"id": "0df0d1adea39a5bef318b74faa37de7f3e00b452", "title": "Appearance-based gaze estimation in the wild", "addresses": [{"address": "Max Planck Institute for Informatics", "lat": "49.25795660", "lng": "7.04577417", "type": "edu"}], "year": 2015, "pdf": "https://scalable.mpi-inf.mpg.de/files/2015/09/zhang_CVPR15.pdf"}, {"id": "633101e794d7b80f55f466fd2941ea24595e10e6", "title": "Face Attribute Prediction with classification CNN", "addresses": [{"address": "KTH Royal Institute of Technology, Stockholm", "lat": "59.34986645", "lng": "18.07063213", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/6331/01e794d7b80f55f466fd2941ea24595e10e6.pdf"}, {"id": "089b5e8eb549723020b908e8eb19479ba39812f5", "title": "A Cross Benchmark Assessment of a Deep Convolutional Neural Network for Face Recognition", "addresses": [{"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}], "year": 2017, "pdf": "http://www.face-recognition-challenge.com/RobustnessOfDCNN-preprint.pdf"}, {"id": "f27fd2a1bc229c773238f1912db94991b8bf389a", "title": "How do you develop a face detector for the unconstrained environment?", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/IVCNZ.2016.7804414"}, {"id": "e94dfdc5581f6bc0338e21ad555b5f1734f8697e", "title": "Learning to Anonymize Faces for Privacy Preserving Action Detection", "addresses": [{"address": "University of California, Davis", "lat": "38.53363490", "lng": "-121.79077264", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.11556.pdf"}, {"id": "aa892fe17c06e2b18db2b12314499a741e755df7", "title": "Improved performance of face recognition using CNN with constrained triplet loss layer", "addresses": [{"address": "University of Sydney", "lat": "-33.88890695", "lng": "151.18943366", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/IJCNN.2017.7966089"}, {"id": "672fae3da801b2a0d2bad65afdbbbf1b2320623e", "title": "Pose-Selective Max Pooling for Measuring Similarity", "addresses": [{"address": "Johns Hopkins University", "lat": "39.32905300", "lng": "-76.61942500", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1609.07042.pdf"}, {"id": "450c6a57f19f5aa45626bb08d7d5d6acdb863b4b", "title": "Towards Interpretable Face Recognition", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.00611.pdf"}, {"id": "a90e6751ae32cb2983891ef2216293311cd6a8e9", "title": "Clustering using Ensemble Clustering Technique", "addresses": [{"address": "Chongqing University", "lat": "29.50841740", "lng": "106.57858552", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/a90e/6751ae32cb2983891ef2216293311cd6a8e9.pdf"}, {"id": "9be696618cfcea90879747a8512f21b10cceac48", "title": "Structural Consistency and Controllability for Diverse Colorization", "addresses": [{"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.02129.pdf"}, {"id": "d8fbd3a16d2e2e59ce0cff98b3fd586863878dc1", "title": "Face detection and recognition for home service robots with end-to-end deep neural networks", "addresses": [{"address": "Futurewei Technologies Inc., Santa Clara, CA", "lat": "37.37344400", "lng": "-121.96487270", "type": "company"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952553"}, {"id": "d4448f8aa320f04066cc43201d55ddd023eb712e", "title": "Clothing Change Aware Person Identification", "addresses": [{"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}, {"address": "University of South Carolina", "lat": "33.99282980", "lng": "-81.02685168", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/d444/8f8aa320f04066cc43201d55ddd023eb712e.pdf"}, {"id": "e1256ff535bf4c024dd62faeb2418d48674ddfa2", "title": "Towards Open-Set Identity Preserving Face Synthesis", "addresses": [{"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.11182.pdf"}, {"id": "10e0e6f1ec00b20bc78a5453a00c792f1334b016", "title": "Temporal Selective Max Pooling Towards Practical Face Recognition", "addresses": [{"address": "Johns Hopkins University", "lat": "39.32905300", "lng": "-76.61942500", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/672f/ae3da801b2a0d2bad65afdbbbf1b2320623e.pdf"}, {"id": "ebb3d5c70bedf2287f9b26ac0031004f8f617b97", "title": "Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans", "addresses": [{"address": "Electrical and Computer Engineering", "lat": "33.58667840", "lng": "-101.87539204", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/MSP.2017.2764116"}, {"id": "585260468d023ffc95f0e539c3fa87254c28510b", "title": "Cardea: Context-Aware Visual Privacy Protection from Pervasive Cameras", "addresses": [{"address": "Hong Kong University of Science and Technology", "lat": "22.33863040", "lng": "114.26203370", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/5852/60468d023ffc95f0e539c3fa87254c28510b.pdf"}, {"id": "db84c6fd771a073023f2b42e48a68eb2d9d31e4a", "title": "A Deep Variational Autoencoder Approach for Robust Facial Symmetrization", "addresses": [{"address": "Shandong University of Science and Technology", "lat": "36.00146435", "lng": "120.11624057", "type": "edu"}, {"address": "Ocean University of China", "lat": "36.16161795", "lng": "120.49355276", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/db84/c6fd771a073023f2b42e48a68eb2d9d31e4a.pdf"}, {"id": "50b58becaf67e92a6d9633e0eea7d352157377c3", "title": "Dependency-Aware Attention Control for Unconstrained Face Recognition with Image Sets", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/50b5/8becaf67e92a6d9633e0eea7d352157377c3.pdf"}, {"id": "5865b6d83ba6dbbf9167f1481e9339c2ef1d1f6b", "title": "Regularized metric adaptation for unconstrained face verification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICPR.2016.7900278"}, {"id": "86204fc037936754813b91898377e8831396551a", "title": "Dense Face Alignment", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1709.01442.pdf"}, {"id": "24b5ea4e262e22768813e7b6581f60e4ab9a8de7", "title": "Facial Soft Biometrics for Recognition in the Wild: Recent Works, Annotation, and COTS Evaluation", "addresses": [{"address": "Universidad Autonoma de Madrid", "lat": "40.48256135", "lng": "-3.69060790", "type": "edu"}, {"address": "Nokia Bell-Labs, Madrid, Spain", "lat": "40.39059140", "lng": "-74.18638510", "type": "company"}, {"address": "Halmstad University", "lat": "56.66340325", "lng": "12.87929727", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/TIFS.2018.2807791"}, {"id": "def2983576001bac7d6461d78451159800938112", "title": "The Do\u2019s and Don\u2019ts for CNN-Based Face Verification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1705.07426.pdf"}, {"id": "35700f9a635bd3c128ab41718b040a0c28d6361a", "title": "DeepGait: A Learning Deep Convolutional Representation for View-Invariant Gait Recognition Using Joint Bayesian", "addresses": [{"address": "Zhejiang University", "lat": "30.19331415", "lng": "120.11930822", "type": "edu"}, {"address": "Zhejiang University of Technology", "lat": "30.29315340", "lng": "120.16204580", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/3570/0f9a635bd3c128ab41718b040a0c28d6361a.pdf"}, {"id": "1648cf24c042122af2f429641ba9599a2187d605", "title": "Boosting cross-age face verification via generative age normalization", "addresses": [{"address": "EURECOM", "lat": "43.61438600", "lng": "7.07112500", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/BTAS.2017.8272698"}, {"id": "856cc83a3121de89d4a6d9283afbcd5d7ef7aa2b", "title": "Image-to-Set Face Recognition Using Locality Repulsion Projections and Sparse Reconstruction-Based Similarity Measure", "addresses": [{"address": "Advanced Digital Sciences Center, Singapore", "lat": "1.30372570", "lng": "103.77377630", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6417014"}, {"id": "3107316f243233d45e3c7e5972517d1ed4991f91", "title": "CVAE-GAN: Fine-Grained Image Generation through Asymmetric Training", "addresses": [{"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}], "year": 2017, "pdf": "http://arxiv.org/abs/1703.10155"}]}
\ No newline at end of file diff --git a/site/datasets/final/megaface.json b/site/datasets/final/megaface.json index 21045601..293143bd 100644 --- a/site/datasets/final/megaface.json +++ b/site/datasets/final/megaface.json @@ -1 +1 @@ -{"id": "96e0cfcd81cdeb8282e29ef9ec9962b125f379b0", "paper": {"paper_id": "96e0cfcd81cdeb8282e29ef9ec9962b125f379b0", "key": "megaface", "title": "The MegaFace Benchmark: 1 Million Faces for Recognition at Scale", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527", "address": {"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}, "name": "MegaFace"}, "address": {"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}, "additional_papers": [{"paper_id": "28d4e027c7e90b51b7d8908fce68128d1964668a", "key": "megaface", "title": "Level Playing Field for Million Scale Face Recognition", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.00393.pdf", "address": "", "name": "MegaFace"}], "citations": [{"id": "73ea06787925157df519a15ee01cc3dc1982a7e0", "title": "Fast Face Image Synthesis with Minimal Training", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1811.01474.pdf"}, {"id": "fba95853ca3135cc52a4b2bc67089041c2a9408c", "title": "Disguised Faces in the Wild", "addresses": [{"address": "Indian Institute of Technology Delhi", "lat": "28.54632595", "lng": "77.27325504", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/fba9/5853ca3135cc52a4b2bc67089041c2a9408c.pdf"}, {"id": "cd6aaa37fffd0b5c2320f386be322b8adaa1cc68", "title": "Deep Face Recognition: A Survey", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.06655.pdf"}, {"id": "0334a8862634988cc684dacd4279c5c0d03704da", "title": "FaceNet2ExpNet: Regularizing a Deep Face Recognition Net for Expression Recognition", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2017, "pdf": "http://arxiv.org/abs/1609.06591"}, {"id": "11a47a91471f40af5cf00449954474fd6e9f7694", "title": "NIRFaceNet: A Convolutional Neural Network for Near-Infrared Face Identification", "addresses": [{"address": "Southwest University", "lat": "29.82366295", "lng": "106.42050016", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/11a4/7a91471f40af5cf00449954474fd6e9f7694.pdf"}, {"id": "58d0c140597aa658345230615fb34e2c750d164c", "title": "Continuous Biometric Verification for Non-Repudiation of Remote Services", "addresses": [{"address": "University of Florence", "lat": "43.77764260", "lng": "11.25976500", "type": "edu"}], "year": 2017, "pdf": "http://doi.acm.org/10.1145/3098954.3098969"}, {"id": "c5e37630d0672e4d44f7dee83ac2c1528be41c2e", "title": "Multi-task Deep Neural Network for Joint Face Recognition and Facial Attribute Prediction", "addresses": [{"address": "Fudan University", "lat": "31.30104395", "lng": "121.50045497", "type": "edu"}], "year": "2017", "pdf": "http://dl.acm.org/citation.cfm?id=3078973"}, {"id": "ec05078be14a11157ac0e1c6b430ac886124589b", "title": "Longitudinal Face Aging in the Wild - Recent Deep Learning Approaches", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Concordia University", "lat": "45.57022705", "lng": "-122.63709346", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/ec05/078be14a11157ac0e1c6b430ac886124589b.pdf"}, {"id": "7788fa76f1488b1597ee2bebc462f628e659f61e", "title": "A Privacy-Aware Architecture at the Edge for Autonomous Real-Time Identity Reidentification in Crowds", "addresses": [{"address": "University of Texas at San Antonio", "lat": "29.58333105", "lng": "-98.61944505", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8063888"}, {"id": "a2d1818eb461564a5153c74028e53856cf0b40fd", "title": "Orthogonal Deep Features Decomposition for Age-Invariant Face Recognition", "addresses": [{"address": "Tencent", "lat": "22.54471540", "lng": "113.93571640", "type": "company"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.07599.pdf"}, {"id": "eb027969f9310e0ae941e2adee2d42cdf07d938c", "title": "VGGFace2: A Dataset for Recognising Faces across Pose and Age", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1710.08092.pdf"}, {"id": "b18858ad6ec88d8b443dffd3e944e653178bc28b", "title": "Trojaning Attack on Neural Networks", "addresses": [{"address": "Purdue University", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/b188/58ad6ec88d8b443dffd3e944e653178bc28b.pdf"}, {"id": "44e6ce12b857aeade03a6e5d1b7fb81202c39489", "title": "VoxCeleb2: Deep Speaker Recognition", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.05622.pdf"}, {"id": "b59cee1f647737ec3296ccb3daa25c890359c307", "title": "Continuously Reproducing Toolchains in Pattern Recognition and Machine Learning Experiments", "addresses": [{"address": "IDIAP Research Institute", "lat": "46.10923700", "lng": "7.08453549", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/b59c/ee1f647737ec3296ccb3daa25c890359c307.pdf"}, {"id": "809ea255d144cff780300440d0f22c96e98abd53", "title": "ArcFace: Additive Angular Margin Loss for Deep Face Recognition", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/809e/a255d144cff780300440d0f22c96e98abd53.pdf"}, {"id": "8d9ffe9f7bf1ff3ecc320afe50a92a867a12aeb7", "title": "Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.02169.pdf"}, {"id": "7cffcb4f24343a924a8317d560202ba9ed26cd0b", "title": "The unconstrained ear recognition challenge", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Islamic Azad University", "lat": "34.84529990", "lng": "48.55962120", "type": "edu"}, {"address": "University of Colorado, Colorado Springs", "lat": "38.89207560", "lng": "-104.79716389", "type": "edu"}, {"address": "University of Ljubljana", "lat": "46.05015580", "lng": "14.46907327", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.06997.pdf"}, {"id": "c88ce5ef33d5e544224ab50162d9883ff6429aa3", "title": "Face Match for Family Reunification: Real-World Face Image Retrieval", "addresses": [{"address": "Central Washington University", "lat": "47.00646895", "lng": "-120.53673040", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/c88c/e5ef33d5e544224ab50162d9883ff6429aa3.pdf"}, {"id": "eb8519cec0d7a781923f68fdca0891713cb81163", "title": "Temporal Non-volume Preserving Approach to Facial Age-Progression and Age-Invariant Face Recognition", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Concordia University", "lat": "45.57022705", "lng": "-122.63709346", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1703.08617.pdf"}, {"id": "628a3f027b7646f398c68a680add48c7969ab1d9", "title": "Plan for Final Year Project : HKU-Face : A Large Scale Dataset for Deep Face Recognition", "addresses": [{"address": "Facebook", "lat": "37.39367170", "lng": "-122.08072620", "type": "company"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/628a/3f027b7646f398c68a680add48c7969ab1d9.pdf"}, {"id": "a52a69bf304d49fba6eac6a73c5169834c77042d", "title": "Margin Loss: Making Faces More Separable", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/LSP.2017.2789251"}, {"id": "54bb25a213944b08298e4e2de54f2ddea890954a", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Middlesex University", "lat": "51.59029705", "lng": "-0.22963221", "type": "edu"}], "year": 2017, "pdf": "http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf"}, {"id": "831b4d8b0c0173b0bac0e328e844a0fbafae6639", "title": "Consensus-Driven Propagation in Massive Unlabeled Data for Face Recognition", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.01407.pdf"}, {"id": "b7b421be7c1dcbb8d41edb11180ba6ec87511976", "title": "A Deep Face Identification Network Enhanced by Facial Attributes Prediction", "addresses": [{"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.00324.pdf"}, {"id": "cbbd13c29d042743f0139f1e044b6bca731886d0", "title": "Not-So-CLEVR: learning same-different relations strains feedforward neural networks.", "addresses": [{"address": "Brown University", "lat": "41.82686820", "lng": "-71.40123146", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/cbbd/13c29d042743f0139f1e044b6bca731886d0.pdf"}, {"id": "9865fe20df8fe11717d92b5ea63469f59cf1635a", "title": "Wildest Faces: Face Detection and Recognition in Violent Settings", "addresses": [{"address": "Hacettepe University", "lat": "39.86742125", "lng": "32.73519072", "type": "edu"}, {"address": "Middle East Technical University", "lat": "39.87549675", "lng": "32.78553506", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.07566.pdf"}, {"id": "3cf1f89d73ca4b25399c237ed3e664a55cd273a2", "title": "Face Sketch Matching via Coupled Deep Transform Learning", "addresses": [{"address": "IIIT Delhi, India", "lat": "28.54562820", "lng": "77.27315050", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1710.02914.pdf"}, {"id": "9b07084c074ba3710fee59ed749c001ae70aa408", "title": "Computational Models of Face Perception.", "addresses": [{"address": "Ohio State University", "lat": "40.00471095", "lng": "-83.02859368", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/9b07/084c074ba3710fee59ed749c001ae70aa408.pdf"}, {"id": "6d07e176c754ac42773690d4b4919a39df85d7ec", "title": "Face Attribute Prediction Using Off-The-Shelf Deep Learning Networks", "addresses": [{"address": "KTH Royal Institute of Technology, Stockholm", "lat": "59.34986645", "lng": "18.07063213", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/6d07/e176c754ac42773690d4b4919a39df85d7ec.pdf"}, {"id": "47190d213caef85e8b9dd0d271dbadc29ed0a953", "title": "The Devil of Face Recognition is in the Noise", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}, {"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11649.pdf"}, {"id": "291265db88023e92bb8c8e6390438e5da148e8f5", "title": "MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition", "addresses": [{"address": "Microsoft", "lat": "47.64233180", "lng": "-122.13693020", "type": "company"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf"}, {"id": "818ecc8c8d4dc398b01a852df90cb8d972530fa5", "title": "Unsupervised Training for 3D Morphable Model Regression", "addresses": [{"address": "Princeton University", "lat": "40.34829285", "lng": "-74.66308325", "type": "edu"}, {"address": "MIT CSAIL", "lat": "42.36194070", "lng": "-71.09043780", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.06098.pdf"}, {"id": "ebb3d5c70bedf2287f9b26ac0031004f8f617b97", "title": "Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans", "addresses": [{"address": "Electrical and Computer Engineering", "lat": "33.58667840", "lng": "-101.87539204", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/MSP.2017.2764116"}, {"id": "323f9ae6bdd2a4e4dce4168f7f7e19c70585c9b5", "title": "Empirically Analyzing the Effect of Dataset Biases on Deep Face Recognition Systems", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1712.01619.pdf"}, {"id": "d949fadc9b6c5c8b067fa42265ad30945f9caa99", "title": "Rethinking Feature Discrimination and Polymerization for Large-scale Recognition", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1710.00870.pdf"}, {"id": "4cdb6144d56098b819076a8572a664a2c2d27f72", "title": "Face Synthesis for Eyeglass-Robust Face Recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.01196.pdf"}, {"id": "672fae3da801b2a0d2bad65afdbbbf1b2320623e", "title": "Pose-Selective Max Pooling for Measuring Similarity", "addresses": [{"address": "Johns Hopkins University", "lat": "39.32905300", "lng": "-76.61942500", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1609.07042.pdf"}, {"id": "0f9fe80fff218573a4805437ba7010fa823ca0e6", "title": "DIY Human Action Data Set Generation", "addresses": [{"address": "Simon Fraser University", "lat": "49.27674540", "lng": "-122.91777375", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.11264.pdf"}, {"id": "2b869d5551b10f13bf6fcdb8d13f0aa4d1f59fc4", "title": "Ring loss: Convex Feature Normalization for Face Recognition", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.00130.pdf"}, {"id": "6193c833ad25ac27abbde1a31c1cabe56ce1515b", "title": "Trojaning Attack on Neural Networks", "addresses": [{"address": "Purdue University", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/5f25/7ca18a92c3595db3bda3224927ec494003a5.pdf"}, {"id": "d4f1eb008eb80595bcfdac368e23ae9754e1e745", "title": "Unconstrained Face Detection and Open-Set Face Recognition Challenge", "addresses": [{"address": "University of Colorado, Colorado Springs", "lat": "38.89207560", "lng": "-104.79716389", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.02337.pdf"}, {"id": "1648cf24c042122af2f429641ba9599a2187d605", "title": "Boosting cross-age face verification via generative age normalization", "addresses": [{"address": "EURECOM", "lat": "43.61438600", "lng": "7.07112500", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/BTAS.2017.8272698"}, {"id": "228ea13041910c41b50d0052bdce924037c3bc6a", "title": "A Review Paper Between Open Source and Commercial SDK and Performance Comparisons of Face Matchers", "addresses": [{"address": "National Science and Technology Development Agency, Thailand", "lat": "14.09502500", "lng": "100.66471010", "type": "gov"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8434495"}, {"id": "5a259f2f5337435f841d39dada832ab24e7b3325", "title": "Face Recognition via Active Annotation and Learning", "addresses": [{"address": "Fudan University", "lat": "31.30104395", "lng": "121.50045497", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2016, "pdf": "http://doi.acm.org/10.1145/2964284.2984059"}, {"id": "746c0205fdf191a737df7af000eaec9409ede73f", "title": "Investigating Nuisances in DCNN-Based Face Recognition", "addresses": [{"address": "University of Florence", "lat": "43.77764260", "lng": "11.25976500", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8423119"}, {"id": "626913b8fcbbaee8932997d6c4a78fe1ce646127", "title": "Learning from Millions of 3D Scans for Large-scale 3D Face Recognition", "addresses": [{"address": "University of Western Australia", "lat": "-31.95040445", "lng": "115.79790037", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1711.05942.pdf"}, {"id": "d80a3d1f3a438e02a6685e66ee908446766fefa9", "title": "Quantifying Facial Age by Posterior of Age Comparisons", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.09687.pdf"}, {"id": "270e5266a1f6e76954dedbc2caf6ff61a5fbf8d0", "title": "EmotioNet Challenge: Recognition of facial expressions of emotion in the wild", "addresses": [{"address": "Ohio State University", "lat": "40.00471095", "lng": "-83.02859368", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/270e/5266a1f6e76954dedbc2caf6ff61a5fbf8d0.pdf"}, {"id": "0081e2188c8f34fcea3e23c49fb3e17883b33551", "title": "Training Deep Face Recognition Systems with Synthetic Data", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/0081/e2188c8f34fcea3e23c49fb3e17883b33551.pdf"}, {"id": "40bb090a4e303f11168dce33ed992f51afe02ff7", "title": "Marginal Loss for Deep Face Recognition", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": 2017, "pdf": "http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Deng_Marginal_Loss_for_CVPR_2017_paper.pdf"}, {"id": "bd8f77b7d3b9d272f7a68defc1412f73e5ac3135", "title": "SphereFace: Deep Hypersphere Embedding for Face Recognition", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}, {"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1704.08063.pdf"}, {"id": "c98983592777952d1751103b4d397d3ace00852d", "title": "Face Synthesis from Facial Identity Features", "addresses": [{"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/c989/83592777952d1751103b4d397d3ace00852d.pdf"}, {"id": "19458454308a9f56b7de76bf7d8ff8eaa52b0173", "title": "Deep Features for Recognizing Disguised Faces in the Wild", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/1945/8454308a9f56b7de76bf7d8ff8eaa52b0173.pdf"}, {"id": "def2983576001bac7d6461d78451159800938112", "title": "The Do\u2019s and Don\u2019ts for CNN-Based Face Verification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1705.07426.pdf"}, {"id": "df2c685aa9c234783ab51c1aa1bf1cb5d71a3dbb", "title": "SREFI: Synthesis of realistic example face images", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1704.06693.pdf"}, {"id": "5812d8239d691e99d4108396f8c26ec0619767a6", "title": "GhostVLAD for set-based face recognition", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.09951.pdf"}, {"id": "368d59cf1733af511ed8abbcbeb4fb47afd4da1c", "title": "To Frontalize or Not To Frontalize: A Study of Face Pre-Processing Techniques and Their Impact on Recognition", "addresses": [{"address": "University of Ljubljana", "lat": "46.05015580", "lng": "14.46907327", "type": "edu"}, {"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/368d/59cf1733af511ed8abbcbeb4fb47afd4da1c.pdf"}, {"id": "3ac3a714042d3ebc159546c26321a1f8f4f5f80c", "title": "Clustering lightened deep representation for large scale face identification", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2017", "pdf": "http://dl.acm.org/citation.cfm?id=3025149"}, {"id": "b55e70df03d9b80c91446a97957bc95772dcc45b", "title": "MixedEmotions: An Open-Source Toolbox for Multimodal Emotion Analysis", "addresses": [{"address": "Brno University of Technology", "lat": "49.20172000", "lng": "16.60331680", "type": "edu"}, {"address": "University of Passau", "lat": "48.56704660", "lng": "13.45178350", "type": "edu"}, {"address": "Deutsche Welle, Bonn, Germany", "lat": "50.71714970", "lng": "7.12825184", "type": "edu"}, {"address": "Expert Systems, Modena, Italy", "lat": "44.65316920", "lng": "10.85862280", "type": "company"}, {"address": "National University of Ireland Galway", "lat": "53.27639715", "lng": "-9.05829961", "type": "edu"}, {"address": "Paradigma Digital, Madrid, Spain", "lat": "40.44029950", "lng": "-3.78700760", "type": "company"}, {"address": "Siren Solutions, Dublin, Ireland", "lat": "53.34980530", "lng": "-6.26030970", "type": "company"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8269329"}, {"id": "8efda5708bbcf658d4f567e3866e3549fe045bbb", "title": "Pre-trained Deep Convolutional Neural Networks for Face Recognition", "addresses": [{"address": "University of Groningen", "lat": "53.21967825", "lng": "6.56251482", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/8efd/a5708bbcf658d4f567e3866e3549fe045bbb.pdf"}, {"id": "e8523c4ac9d7aa21f3eb4062e09f2a3bc1eedcf7", "title": "Toward End-to-End Face Recognition Through Alignment Learning", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1701.07174.pdf"}, {"id": "3dfd94d3fad7e17f52a8ae815eb9cc5471172bc0", "title": "Face2Text: Collecting an Annotated Image Description Corpus for the Generation of Rich Face Descriptions", "addresses": [{"address": "University of Copenhagen", "lat": "55.68015020", "lng": "12.57232700", "type": "edu"}, {"address": "University of Malta", "lat": "35.90232260", "lng": "14.48341890", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/3dfd/94d3fad7e17f52a8ae815eb9cc5471172bc0.pdf"}, {"id": "d7cbedbee06293e78661335c7dd9059c70143a28", "title": "MobileFaceNets: Efficient CNNs for Accurate Real-time Face Verification on Mobile Devices", "addresses": [{"address": "Beijing Jiaotong University", "lat": "39.94976005", "lng": "116.33629046", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.07573.pdf"}, {"id": "93af36da08bf99e68c9b0d36e141ed8154455ac2", "title": "A Dditive M Argin S Oftmax for F Ace V Erification", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}, {"address": "University of Electronic Science and Technology of China", "lat": "40.01419050", "lng": "-83.03091430", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/93af/36da08bf99e68c9b0d36e141ed8154455ac2.pdf"}, {"id": "582edc19f2b1ab2ac6883426f147196c8306685a", "title": "Do We Really Need to Collect Millions of Faces for Effective Face Recognition?", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/be6c/db7b181e73f546d43cf2ab6bc7181d7d619b.pdf"}, {"id": "2359c3f763e96e0ee62b1119c897a32ce9715a77", "title": "Neural Computing on a Raspberry Pi : Applications to Zebrafish Behavior Monitoring", "addresses": [{"address": "Brown University", "lat": "41.82686820", "lng": "-71.40123146", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/2359/c3f763e96e0ee62b1119c897a32ce9715a77.pdf"}, {"id": "f3a59d85b7458394e3c043d8277aa1ffe3cdac91", "title": "Query-Free Attacks on Industry-Grade Face Recognition Systems under Resource Constraints", "addresses": [{"address": "Indiana University", "lat": "39.86948105", "lng": "-84.87956905", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1802.09900.pdf"}, {"id": "4d46fd59364ed5ec8f50abe68cd7886379bfd80a", "title": "Learning from Millions of 3D Scans for Large-Scale 3D Face Recognition", "addresses": [{"address": "University of Western Australia", "lat": "-31.95040445", "lng": "115.79790037", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1711.05942.pdf"}, {"id": "9f65319b8a33c8ec11da2f034731d928bf92e29d", "title": "Taking Roll: a Pipeline for Face Recognition", "addresses": [{"address": "Louisiana State University", "lat": "30.40550035", "lng": "-91.18620474", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/9f65/319b8a33c8ec11da2f034731d928bf92e29d.pdf"}, {"id": "9e31e77f9543ab42474ba4e9330676e18c242e72", "title": "The Devil of Face Recognition is in the Noise", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11649.pdf"}, {"id": "841855205818d3a6d6f85ec17a22515f4f062882", "title": "Low Resolution Face Recognition in the Wild", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.11529.pdf"}, {"id": "e64c166dc5bb33bc61462a8b5ac92edb24d905a1", "title": "Fast Face Image Synthesis with Minimal Training.", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1811.01474.pdf"}, {"id": "3c5ba48d25fbe24691ed060fa8f2099cc9eba14f", "title": "Racial Faces in-the-Wild: Reducing Racial Bias by Deep Unsupervised Domain Adaptation", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1812.00194.pdf"}, {"id": "7a7fddb3020e0c2dd4e3fe275329eb10f1cfbb8a", "title": "Orthogonal Deep Features Decomposition for Age-Invariant Face Recognition", "addresses": [{"address": "Tencent", "lat": "22.54471540", "lng": "113.93571640", "type": "company"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.07599.pdf"}, {"id": "a32878e85941b5392d58d28e5248f94e16e25d78", "title": "Quality Classified Image Analysis with Application to Face Detection and Recognition", "addresses": [{"address": "Shenzhen University", "lat": "22.53521465", "lng": "113.93159110", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1801.06445.pdf"}]}
\ No newline at end of file +{"id": "28d4e027c7e90b51b7d8908fce68128d1964668a", "paper": {"paper_id": "28d4e027c7e90b51b7d8908fce68128d1964668a", "key": "megaface", "title": "Level Playing Field for Million Scale Face Recognition", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.00393.pdf", "address": "", "name": "MegaFace"}, "address": {"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}, "additional_papers": [{"paper_id": "96e0cfcd81cdeb8282e29ef9ec9962b125f379b0", "key": "megaface", "title": "The MegaFace Benchmark: 1 Million Faces for Recognition at Scale", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527", "address": {"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}, "name": "MegaFace"}], "citations": [{"id": "b59cee1f647737ec3296ccb3daa25c890359c307", "title": "Continuously Reproducing Toolchains in Pattern Recognition and Machine Learning Experiments", "addresses": [{"address": "IDIAP Research Institute", "lat": "46.10923700", "lng": "7.08453549", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/b59c/ee1f647737ec3296ccb3daa25c890359c307.pdf"}, {"id": "ebb3d5c70bedf2287f9b26ac0031004f8f617b97", "title": "Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans", "addresses": [{"address": "Electrical and Computer Engineering", "lat": "33.58667840", "lng": "-101.87539204", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/MSP.2017.2764116"}, {"id": "f3a59d85b7458394e3c043d8277aa1ffe3cdac91", "title": "Query-Free Attacks on Industry-Grade Face Recognition Systems under Resource Constraints", "addresses": [{"address": "Indiana University", "lat": "39.86948105", "lng": "-84.87956905", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1802.09900.pdf"}, {"id": "cd6aaa37fffd0b5c2320f386be322b8adaa1cc68", "title": "Deep Face Recognition: A Survey", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.06655.pdf"}, {"id": "4d46fd59364ed5ec8f50abe68cd7886379bfd80a", "title": "Learning from Millions of 3D Scans for Large-Scale 3D Face Recognition", "addresses": [{"address": "University of Western Australia", "lat": "-31.95040445", "lng": "115.79790037", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1711.05942.pdf"}, {"id": "9f65319b8a33c8ec11da2f034731d928bf92e29d", "title": "Taking Roll: a Pipeline for Face Recognition", "addresses": [{"address": "Louisiana State University", "lat": "30.40550035", "lng": "-91.18620474", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/9f65/319b8a33c8ec11da2f034731d928bf92e29d.pdf"}, {"id": "9e31e77f9543ab42474ba4e9330676e18c242e72", "title": "The Devil of Face Recognition is in the Noise", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11649.pdf"}, {"id": "841855205818d3a6d6f85ec17a22515f4f062882", "title": "Low Resolution Face Recognition in the Wild", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.11529.pdf"}, {"id": "8efda5708bbcf658d4f567e3866e3549fe045bbb", "title": "Pre-trained Deep Convolutional Neural Networks for Face Recognition", "addresses": [{"address": "University of Groningen", "lat": "53.21967825", "lng": "6.56251482", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/8efd/a5708bbcf658d4f567e3866e3549fe045bbb.pdf"}, {"id": "9865fe20df8fe11717d92b5ea63469f59cf1635a", "title": "Wildest Faces: Face Detection and Recognition in Violent Settings", "addresses": [{"address": "Hacettepe University", "lat": "39.86742125", "lng": "32.73519072", "type": "edu"}, {"address": "Middle East Technical University", "lat": "39.87549675", "lng": "32.78553506", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.07566.pdf"}, {"id": "e64c166dc5bb33bc61462a8b5ac92edb24d905a1", "title": "Fast Face Image Synthesis with Minimal Training.", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1811.01474.pdf"}, {"id": "3c5ba48d25fbe24691ed060fa8f2099cc9eba14f", "title": "Racial Faces in-the-Wild: Reducing Racial Bias by Deep Unsupervised Domain Adaptation", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1812.00194.pdf"}, {"id": "7a7fddb3020e0c2dd4e3fe275329eb10f1cfbb8a", "title": "Orthogonal Deep Features Decomposition for Age-Invariant Face Recognition", "addresses": [{"address": "Tencent", "lat": "22.54471540", "lng": "113.93571640", "type": "company"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.07599.pdf"}, {"id": "a32878e85941b5392d58d28e5248f94e16e25d78", "title": "Quality Classified Image Analysis with Application to Face Detection and Recognition", "addresses": [{"address": "Shenzhen University", "lat": "22.53521465", "lng": "113.93159110", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1801.06445.pdf"}, {"id": "73ea06787925157df519a15ee01cc3dc1982a7e0", "title": "Fast Face Image Synthesis with Minimal Training", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1811.01474.pdf"}, {"id": "fba95853ca3135cc52a4b2bc67089041c2a9408c", "title": "Disguised Faces in the Wild", "addresses": [{"address": "Indian Institute of Technology Delhi", "lat": "28.54632595", "lng": "77.27325504", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/fba9/5853ca3135cc52a4b2bc67089041c2a9408c.pdf"}, {"id": "0334a8862634988cc684dacd4279c5c0d03704da", "title": "FaceNet2ExpNet: Regularizing a Deep Face Recognition Net for Expression Recognition", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2017, "pdf": "http://arxiv.org/abs/1609.06591"}, {"id": "11a47a91471f40af5cf00449954474fd6e9f7694", "title": "NIRFaceNet: A Convolutional Neural Network for Near-Infrared Face Identification", "addresses": [{"address": "Southwest University", "lat": "29.82366295", "lng": "106.42050016", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/11a4/7a91471f40af5cf00449954474fd6e9f7694.pdf"}, {"id": "58d0c140597aa658345230615fb34e2c750d164c", "title": "Continuous Biometric Verification for Non-Repudiation of Remote Services", "addresses": [{"address": "University of Florence", "lat": "43.77764260", "lng": "11.25976500", "type": "edu"}], "year": 2017, "pdf": "http://doi.acm.org/10.1145/3098954.3098969"}, {"id": "c5e37630d0672e4d44f7dee83ac2c1528be41c2e", "title": "Multi-task Deep Neural Network for Joint Face Recognition and Facial Attribute Prediction", "addresses": [{"address": "Fudan University", "lat": "31.30104395", "lng": "121.50045497", "type": "edu"}], "year": "2017", "pdf": "http://dl.acm.org/citation.cfm?id=3078973"}, {"id": "ec05078be14a11157ac0e1c6b430ac886124589b", "title": "Longitudinal Face Aging in the Wild - Recent Deep Learning Approaches", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Concordia University", "lat": "45.57022705", "lng": "-122.63709346", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/ec05/078be14a11157ac0e1c6b430ac886124589b.pdf"}, {"id": "7788fa76f1488b1597ee2bebc462f628e659f61e", "title": "A Privacy-Aware Architecture at the Edge for Autonomous Real-Time Identity Reidentification in Crowds", "addresses": [{"address": "University of Texas at San Antonio", "lat": "29.58333105", "lng": "-98.61944505", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8063888"}, {"id": "a2d1818eb461564a5153c74028e53856cf0b40fd", "title": "Orthogonal Deep Features Decomposition for Age-Invariant Face Recognition", "addresses": [{"address": "Tencent", "lat": "22.54471540", "lng": "113.93571640", "type": "company"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.07599.pdf"}, {"id": "eb027969f9310e0ae941e2adee2d42cdf07d938c", "title": "VGGFace2: A Dataset for Recognising Faces across Pose and Age", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1710.08092.pdf"}, {"id": "b18858ad6ec88d8b443dffd3e944e653178bc28b", "title": "Trojaning Attack on Neural Networks", "addresses": [{"address": "Purdue University", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/b188/58ad6ec88d8b443dffd3e944e653178bc28b.pdf"}, {"id": "44e6ce12b857aeade03a6e5d1b7fb81202c39489", "title": "VoxCeleb2: Deep Speaker Recognition", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.05622.pdf"}, {"id": "809ea255d144cff780300440d0f22c96e98abd53", "title": "ArcFace: Additive Angular Margin Loss for Deep Face Recognition", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/809e/a255d144cff780300440d0f22c96e98abd53.pdf"}, {"id": "8d9ffe9f7bf1ff3ecc320afe50a92a867a12aeb7", "title": "Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.02169.pdf"}, {"id": "7cffcb4f24343a924a8317d560202ba9ed26cd0b", "title": "The unconstrained ear recognition challenge", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Islamic Azad University", "lat": "34.84529990", "lng": "48.55962120", "type": "edu"}, {"address": "University of Colorado, Colorado Springs", "lat": "38.89207560", "lng": "-104.79716389", "type": "edu"}, {"address": "University of Ljubljana", "lat": "46.05015580", "lng": "14.46907327", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.06997.pdf"}, {"id": "c88ce5ef33d5e544224ab50162d9883ff6429aa3", "title": "Face Match for Family Reunification: Real-World Face Image Retrieval", "addresses": [{"address": "Central Washington University", "lat": "47.00646895", "lng": "-120.53673040", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/c88c/e5ef33d5e544224ab50162d9883ff6429aa3.pdf"}, {"id": "eb8519cec0d7a781923f68fdca0891713cb81163", "title": "Temporal Non-volume Preserving Approach to Facial Age-Progression and Age-Invariant Face Recognition", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Concordia University", "lat": "45.57022705", "lng": "-122.63709346", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1703.08617.pdf"}, {"id": "628a3f027b7646f398c68a680add48c7969ab1d9", "title": "Plan for Final Year Project : HKU-Face : A Large Scale Dataset for Deep Face Recognition", "addresses": [{"address": "Facebook", "lat": "37.39367170", "lng": "-122.08072620", "type": "company"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/628a/3f027b7646f398c68a680add48c7969ab1d9.pdf"}, {"id": "a52a69bf304d49fba6eac6a73c5169834c77042d", "title": "Margin Loss: Making Faces More Separable", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/LSP.2017.2789251"}, {"id": "54bb25a213944b08298e4e2de54f2ddea890954a", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Middlesex University", "lat": "51.59029705", "lng": "-0.22963221", "type": "edu"}], "year": 2017, "pdf": "http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf"}, {"id": "831b4d8b0c0173b0bac0e328e844a0fbafae6639", "title": "Consensus-Driven Propagation in Massive Unlabeled Data for Face Recognition", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.01407.pdf"}, {"id": "b7b421be7c1dcbb8d41edb11180ba6ec87511976", "title": "A Deep Face Identification Network Enhanced by Facial Attributes Prediction", "addresses": [{"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.00324.pdf"}, {"id": "cbbd13c29d042743f0139f1e044b6bca731886d0", "title": "Not-So-CLEVR: learning same-different relations strains feedforward neural networks.", "addresses": [{"address": "Brown University", "lat": "41.82686820", "lng": "-71.40123146", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/cbbd/13c29d042743f0139f1e044b6bca731886d0.pdf"}, {"id": "3cf1f89d73ca4b25399c237ed3e664a55cd273a2", "title": "Face Sketch Matching via Coupled Deep Transform Learning", "addresses": [{"address": "IIIT Delhi, India", "lat": "28.54562820", "lng": "77.27315050", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1710.02914.pdf"}, {"id": "9b07084c074ba3710fee59ed749c001ae70aa408", "title": "Computational Models of Face Perception.", "addresses": [{"address": "Ohio State University", "lat": "40.00471095", "lng": "-83.02859368", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/9b07/084c074ba3710fee59ed749c001ae70aa408.pdf"}, {"id": "6d07e176c754ac42773690d4b4919a39df85d7ec", "title": "Face Attribute Prediction Using Off-The-Shelf Deep Learning Networks", "addresses": [{"address": "KTH Royal Institute of Technology, Stockholm", "lat": "59.34986645", "lng": "18.07063213", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/6d07/e176c754ac42773690d4b4919a39df85d7ec.pdf"}, {"id": "47190d213caef85e8b9dd0d271dbadc29ed0a953", "title": "The Devil of Face Recognition is in the Noise", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}, {"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11649.pdf"}, {"id": "291265db88023e92bb8c8e6390438e5da148e8f5", "title": "MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition", "addresses": [{"address": "Microsoft", "lat": "47.64233180", "lng": "-122.13693020", "type": "company"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf"}, {"id": "818ecc8c8d4dc398b01a852df90cb8d972530fa5", "title": "Unsupervised Training for 3D Morphable Model Regression", "addresses": [{"address": "Princeton University", "lat": "40.34829285", "lng": "-74.66308325", "type": "edu"}, {"address": "MIT CSAIL", "lat": "42.36194070", "lng": "-71.09043780", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.06098.pdf"}, {"id": "323f9ae6bdd2a4e4dce4168f7f7e19c70585c9b5", "title": "Empirically Analyzing the Effect of Dataset Biases on Deep Face Recognition Systems", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1712.01619.pdf"}, {"id": "d949fadc9b6c5c8b067fa42265ad30945f9caa99", "title": "Rethinking Feature Discrimination and Polymerization for Large-scale Recognition", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1710.00870.pdf"}, {"id": "4cdb6144d56098b819076a8572a664a2c2d27f72", "title": "Face Synthesis for Eyeglass-Robust Face Recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.01196.pdf"}, {"id": "672fae3da801b2a0d2bad65afdbbbf1b2320623e", "title": "Pose-Selective Max Pooling for Measuring Similarity", "addresses": [{"address": "Johns Hopkins University", "lat": "39.32905300", "lng": "-76.61942500", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1609.07042.pdf"}, {"id": "0f9fe80fff218573a4805437ba7010fa823ca0e6", "title": "DIY Human Action Data Set Generation", "addresses": [{"address": "Simon Fraser University", "lat": "49.27674540", "lng": "-122.91777375", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.11264.pdf"}, {"id": "2b869d5551b10f13bf6fcdb8d13f0aa4d1f59fc4", "title": "Ring loss: Convex Feature Normalization for Face Recognition", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.00130.pdf"}, {"id": "6193c833ad25ac27abbde1a31c1cabe56ce1515b", "title": "Trojaning Attack on Neural Networks", "addresses": [{"address": "Purdue University", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/5f25/7ca18a92c3595db3bda3224927ec494003a5.pdf"}, {"id": "d4f1eb008eb80595bcfdac368e23ae9754e1e745", "title": "Unconstrained Face Detection and Open-Set Face Recognition Challenge", "addresses": [{"address": "University of Colorado, Colorado Springs", "lat": "38.89207560", "lng": "-104.79716389", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.02337.pdf"}, {"id": "1648cf24c042122af2f429641ba9599a2187d605", "title": "Boosting cross-age face verification via generative age normalization", "addresses": [{"address": "EURECOM", "lat": "43.61438600", "lng": "7.07112500", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/BTAS.2017.8272698"}, {"id": "228ea13041910c41b50d0052bdce924037c3bc6a", "title": "A Review Paper Between Open Source and Commercial SDK and Performance Comparisons of Face Matchers", "addresses": [{"address": "National Science and Technology Development Agency, Thailand", "lat": "14.09502500", "lng": "100.66471010", "type": "gov"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8434495"}, {"id": "5a259f2f5337435f841d39dada832ab24e7b3325", "title": "Face Recognition via Active Annotation and Learning", "addresses": [{"address": "Fudan University", "lat": "31.30104395", "lng": "121.50045497", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2016, "pdf": "http://doi.acm.org/10.1145/2964284.2984059"}, {"id": "746c0205fdf191a737df7af000eaec9409ede73f", "title": "Investigating Nuisances in DCNN-Based Face Recognition", "addresses": [{"address": "University of Florence", "lat": "43.77764260", "lng": "11.25976500", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8423119"}, {"id": "626913b8fcbbaee8932997d6c4a78fe1ce646127", "title": "Learning from Millions of 3D Scans for Large-scale 3D Face Recognition", "addresses": [{"address": "University of Western Australia", "lat": "-31.95040445", "lng": "115.79790037", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1711.05942.pdf"}, {"id": "d80a3d1f3a438e02a6685e66ee908446766fefa9", "title": "Quantifying Facial Age by Posterior of Age Comparisons", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.09687.pdf"}, {"id": "270e5266a1f6e76954dedbc2caf6ff61a5fbf8d0", "title": "EmotioNet Challenge: Recognition of facial expressions of emotion in the wild", "addresses": [{"address": "Ohio State University", "lat": "40.00471095", "lng": "-83.02859368", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/270e/5266a1f6e76954dedbc2caf6ff61a5fbf8d0.pdf"}, {"id": "0081e2188c8f34fcea3e23c49fb3e17883b33551", "title": "Training Deep Face Recognition Systems with Synthetic Data", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/0081/e2188c8f34fcea3e23c49fb3e17883b33551.pdf"}, {"id": "40bb090a4e303f11168dce33ed992f51afe02ff7", "title": "Marginal Loss for Deep Face Recognition", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": 2017, "pdf": "http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Deng_Marginal_Loss_for_CVPR_2017_paper.pdf"}, {"id": "bd8f77b7d3b9d272f7a68defc1412f73e5ac3135", "title": "SphereFace: Deep Hypersphere Embedding for Face Recognition", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}, {"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1704.08063.pdf"}, {"id": "c98983592777952d1751103b4d397d3ace00852d", "title": "Face Synthesis from Facial Identity Features", "addresses": [{"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/c989/83592777952d1751103b4d397d3ace00852d.pdf"}, {"id": "19458454308a9f56b7de76bf7d8ff8eaa52b0173", "title": "Deep Features for Recognizing Disguised Faces in the Wild", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/1945/8454308a9f56b7de76bf7d8ff8eaa52b0173.pdf"}, {"id": "def2983576001bac7d6461d78451159800938112", "title": "The Do\u2019s and Don\u2019ts for CNN-Based Face Verification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1705.07426.pdf"}, {"id": "df2c685aa9c234783ab51c1aa1bf1cb5d71a3dbb", "title": "SREFI: Synthesis of realistic example face images", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1704.06693.pdf"}, {"id": "5812d8239d691e99d4108396f8c26ec0619767a6", "title": "GhostVLAD for set-based face recognition", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.09951.pdf"}, {"id": "368d59cf1733af511ed8abbcbeb4fb47afd4da1c", "title": "To Frontalize or Not To Frontalize: A Study of Face Pre-Processing Techniques and Their Impact on Recognition", "addresses": [{"address": "University of Ljubljana", "lat": "46.05015580", "lng": "14.46907327", "type": "edu"}, {"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/368d/59cf1733af511ed8abbcbeb4fb47afd4da1c.pdf"}, {"id": "3ac3a714042d3ebc159546c26321a1f8f4f5f80c", "title": "Clustering lightened deep representation for large scale face identification", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2017", "pdf": "http://dl.acm.org/citation.cfm?id=3025149"}, {"id": "b55e70df03d9b80c91446a97957bc95772dcc45b", "title": "MixedEmotions: An Open-Source Toolbox for Multimodal Emotion Analysis", "addresses": [{"address": "Brno University of Technology", "lat": "49.20172000", "lng": "16.60331680", "type": "edu"}, {"address": "University of Passau", "lat": "48.56704660", "lng": "13.45178350", "type": "edu"}, {"address": "Deutsche Welle, Bonn, Germany", "lat": "50.71714970", "lng": "7.12825184", "type": "edu"}, {"address": "Expert Systems, Modena, Italy", "lat": "44.65316920", "lng": "10.85862280", "type": "company"}, {"address": "National University of Ireland Galway", "lat": "53.27639715", "lng": "-9.05829961", "type": "edu"}, {"address": "Paradigma Digital, Madrid, Spain", "lat": "40.44029950", "lng": "-3.78700760", "type": "company"}, {"address": "Siren Solutions, Dublin, Ireland", "lat": "53.34980530", "lng": "-6.26030970", "type": "company"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8269329"}, {"id": "e8523c4ac9d7aa21f3eb4062e09f2a3bc1eedcf7", "title": "Toward End-to-End Face Recognition Through Alignment Learning", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1701.07174.pdf"}, {"id": "3dfd94d3fad7e17f52a8ae815eb9cc5471172bc0", "title": "Face2Text: Collecting an Annotated Image Description Corpus for the Generation of Rich Face Descriptions", "addresses": [{"address": "University of Copenhagen", "lat": "55.68015020", "lng": "12.57232700", "type": "edu"}, {"address": "University of Malta", "lat": "35.90232260", "lng": "14.48341890", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/3dfd/94d3fad7e17f52a8ae815eb9cc5471172bc0.pdf"}, {"id": "d7cbedbee06293e78661335c7dd9059c70143a28", "title": "MobileFaceNets: Efficient CNNs for Accurate Real-time Face Verification on Mobile Devices", "addresses": [{"address": "Beijing Jiaotong University", "lat": "39.94976005", "lng": "116.33629046", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.07573.pdf"}, {"id": "93af36da08bf99e68c9b0d36e141ed8154455ac2", "title": "A Dditive M Argin S Oftmax for F Ace V Erification", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}, {"address": "University of Electronic Science and Technology of China", "lat": "40.01419050", "lng": "-83.03091430", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/93af/36da08bf99e68c9b0d36e141ed8154455ac2.pdf"}, {"id": "582edc19f2b1ab2ac6883426f147196c8306685a", "title": "Do We Really Need to Collect Millions of Faces for Effective Face Recognition?", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/be6c/db7b181e73f546d43cf2ab6bc7181d7d619b.pdf"}, {"id": "2359c3f763e96e0ee62b1119c897a32ce9715a77", "title": "Neural Computing on a Raspberry Pi : Applications to Zebrafish Behavior Monitoring", "addresses": [{"address": "Brown University", "lat": "41.82686820", "lng": "-71.40123146", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/2359/c3f763e96e0ee62b1119c897a32ce9715a77.pdf"}]}
\ No newline at end of file diff --git a/site/datasets/final/morph.csv b/site/datasets/final/morph.csv new file mode 100644 index 00000000..cf7ad22b --- /dev/null +++ b/site/datasets/final/morph.csv @@ -0,0 +1,286 @@ +index,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,MORPH Commercial,morph,0.0,0.0,,,9055b155cbabdce3b98e16e5ac9c0edf00f9552f,main,http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78,MORPH: a longitudinal image database of normal adult age-progression,2006 +1,MORPH Commercial,morph,34.80809035,135.45785218,Osaka University,edu,dad6b36fd515bda801f3d22a462cc62348f6aad8,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6117531,Gait-based age estimation using a whole-generation gait database,2011 +2,MORPH Commercial,morph,40.0044795,116.370238,Chinese Academy of Sciences,edu,ddd0f1c53f76d7fc20e11b7e33bbdc0437516d2b,citation,https://doi.org/10.1109/ICDSP.2016.7868598,Deep learning-based learning to rank with ties for image re-ranking,2016 +3,MORPH Commercial,morph,39.1118774,117.3497451,Civil Aviation University of China,edu,ddd0f1c53f76d7fc20e11b7e33bbdc0437516d2b,citation,https://doi.org/10.1109/ICDSP.2016.7868598,Deep learning-based learning to rank with ties for image re-ranking,2016 +4,MORPH Commercial,morph,25.0410728,121.6147562,Institute of Information Science,edu,4c71b0cdb6b80889b976e8eb4457942bd4dd7b66,citation,https://doi.org/10.1109/TIP.2014.2387379,A Learning Framework for Age Rank Estimation Based on Face Images With Scattering Transform,2015 +5,MORPH Commercial,morph,51.0267513,-1.3972576,"IBM Hursley Labs, UK",company,7123e510dea783035b02f6c35e35a1a09677c5ab,citation,https://doi.org/10.1109/ICPR.2016.7900297,Back to the future: A fully automatic method for robust age progression,2016 +6,MORPH Commercial,morph,35.9042272,-78.85565763,"IBM Research, North Carolina",company,7123e510dea783035b02f6c35e35a1a09677c5ab,citation,https://doi.org/10.1109/ICPR.2016.7900297,Back to the future: A fully automatic method for robust age progression,2016 +7,MORPH Commercial,morph,51.49887085,-0.17560797,Imperial College London,edu,7123e510dea783035b02f6c35e35a1a09677c5ab,citation,https://doi.org/10.1109/ICPR.2016.7900297,Back to the future: A fully automatic method for robust age progression,2016 +8,MORPH Commercial,morph,35.5167538,139.48342251,Tokyo Institute of Technology,edu,3083d2c6d4f456e01cbb72930dc2207af98a6244,citation,http://pdfs.semanticscholar.org/3083/d2c6d4f456e01cbb72930dc2207af98a6244.pdf,Perceived Age Estimation from Face Images,2011 +9,MORPH Commercial,morph,41.3868913,2.16352385,University of Barcelona,edu,500fbe18afd44312738cab91b4689c12b4e0eeee,citation,http://www.maia.ub.es/~sergio/linked/ijcnn_age_and_cultural_2015.pdf,ChaLearn looking at people 2015 new competitions: Age estimation and cultural event recognition,2015 +10,MORPH Commercial,morph,45.4312742,12.3265377,University of Venezia,edu,500fbe18afd44312738cab91b4689c12b4e0eeee,citation,http://www.maia.ub.es/~sergio/linked/ijcnn_age_and_cultural_2015.pdf,ChaLearn looking at people 2015 new competitions: Age estimation and cultural event recognition,2015 +11,MORPH Commercial,morph,41.10427915,29.02231159,Istanbul Technical University,edu,fd53be2e0a9f33080a9db4b5a5e416e24ae8e198,citation,https://arxiv.org/pdf/1606.02909.pdf,Apparent Age Estimation Using Ensemble of Deep Learning Models,2016 +12,MORPH Commercial,morph,40.6341322,-8.6599726,"University of Beira Interior, Portugal",edu,81c21f4aafab39b7f5965829ec9e0f828d6a6182,citation,https://doi.org/10.1109/BTAS.2015.7358744,Acquiring high-resolution face images in outdoor environments: A master-slave calibration algorithm,2015 +13,MORPH Commercial,morph,42.36782045,-71.12666653,Harvard University,edu,0ba402af3b8682e2aa89f76bd823ddffdf89fa0a,citation,http://pdfs.semanticscholar.org/c0d8/4377168c554cb8e83099bed940091fe49dec.pdf,Squared Earth Mover's Distance-based Loss for Training Deep Neural Networks,2016 +14,MORPH Commercial,morph,40.9153196,-73.1270626,Stony Brook University,edu,0ba402af3b8682e2aa89f76bd823ddffdf89fa0a,citation,http://pdfs.semanticscholar.org/c0d8/4377168c554cb8e83099bed940091fe49dec.pdf,Squared Earth Mover's Distance-based Loss for Training Deep Neural Networks,2016 +15,MORPH Commercial,morph,40.47913175,-74.43168868,Rutgers University,edu,31f1e711fcf82c855f27396f181bf5e565a2f58d,citation,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.54,Unconstrained Age Estimation with Deep Convolutional Neural Networks,2015 +16,MORPH Commercial,morph,39.2899685,-76.62196103,University of Maryland,edu,31f1e711fcf82c855f27396f181bf5e565a2f58d,citation,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.54,Unconstrained Age Estimation with Deep Convolutional Neural Networks,2015 +17,MORPH Commercial,morph,39.65404635,-79.96475355,West Virginia University,edu,af12a79892bd030c19dfea392f7a7ccb0e7ebb72,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247972,A study on human age estimation under facial expression changes,2012 +18,MORPH Commercial,morph,23.09461185,113.28788994,Sun Yat-Sen University,edu,2d7c2c015053fff5300515a7addcd74b523f3f66,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8323422,Age-Related Factor Guided Joint Task Modeling Convolutional Neural Network for Cross-Age Face Recognition,2018 +19,MORPH Commercial,morph,40.0044795,116.370238,Chinese Academy of Sciences,edu,b234d429c9ea682e54fca52f4b889b3170f65ffc,citation,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2012.22,A Concatenational Graph Evolution Aging Model,2012 +20,MORPH Commercial,morph,39.9922379,116.30393816,Peking University,edu,b234d429c9ea682e54fca52f4b889b3170f65ffc,citation,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2012.22,A Concatenational Graph Evolution Aging Model,2012 +21,MORPH Commercial,morph,40.00229045,116.32098908,Tsinghua University,edu,b234d429c9ea682e54fca52f4b889b3170f65ffc,citation,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2012.22,A Concatenational Graph Evolution Aging Model,2012 +22,MORPH Commercial,morph,30.19331415,120.11930822,Zhejiang University,edu,ee65cee5151928c63d3ef36fcbb582fabb2b6d2c,citation,https://doi.org/10.1109/LSP.2016.2602538,Structure-Aware Slow Feature Analysis for Age Estimation,2016 +23,MORPH Commercial,morph,40.0141905,-83.0309143,University of Electronic Science and Technology of China,edu,ee65cee5151928c63d3ef36fcbb582fabb2b6d2c,citation,https://doi.org/10.1109/LSP.2016.2602538,Structure-Aware Slow Feature Analysis for Age Estimation,2016 +24,MORPH Commercial,morph,40.0044795,116.370238,Chinese Academy of Sciences,edu,288964068cd87d97a98b8bc927d6e0d2349458a2,citation,https://pdfs.semanticscholar.org/2889/64068cd87d97a98b8bc927d6e0d2349458a2.pdf,Mean-Variance Loss for Deep Age Estimation from a Face,0 +25,MORPH Commercial,morph,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,288964068cd87d97a98b8bc927d6e0d2349458a2,citation,https://pdfs.semanticscholar.org/2889/64068cd87d97a98b8bc927d6e0d2349458a2.pdf,Mean-Variance Loss for Deep Age Estimation from a Face,0 +26,MORPH Commercial,morph,39.65404635,-79.96475355,West Virginia University,edu,cd63759842a56bd2ede3999f6e11a74ccbec318b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995404,Simultaneous dimensionality reduction and human age estimation via kernel partial least squares regression,2011 +27,MORPH Commercial,morph,28.5456282,77.2731505,"IIIT Delhi, India",edu,ffc81ced9ee8223ab0adb18817321cbee99606e6,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791157,A multibiometrics-based CAPTCHA for improved online security,2016 +28,MORPH Commercial,morph,39.65404635,-79.96475355,West Virginia University,edu,ffc81ced9ee8223ab0adb18817321cbee99606e6,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791157,A multibiometrics-based CAPTCHA for improved online security,2016 +29,MORPH Commercial,morph,41.25713055,-72.9896696,Yale University,edu,df7312cbabb7d75d915ba0d91dea77100ded5c56,citation,https://arxiv.org/pdf/1811.06446.pdf,Preliminary Studies on a Large Face Database,2018 +30,MORPH Commercial,morph,29.6328784,-82.3490133,University of Florida,edu,df7312cbabb7d75d915ba0d91dea77100ded5c56,citation,https://arxiv.org/pdf/1811.06446.pdf,Preliminary Studies on a Large Face Database,2018 +31,MORPH Commercial,morph,31.83907195,117.26420748,University of Science and Technology of China,edu,56c700693b63e3da3b985777da6d9256e2e0dc21,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_079.pdf,Global refinement of random forest,2015 +32,MORPH Commercial,morph,40.00229045,116.32098908,Tsinghua University,edu,1e344b99583b782e3eaf152cdfa15f217b781181,citation,http://doi.acm.org/10.1145/2499788.2499789,A new biologically inspired active appearance model for face age estimation by using local ordinal ranking,2013 +33,MORPH Commercial,morph,39.94976005,116.33629046,Beijing Jiaotong University,edu,4b9ec224949c79a980a5a66664d0ac6233c3d575,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7565501,Human Facial Age Estimation by Cost-Sensitive Label Ranking and Trace Norm Regularization,2017 +34,MORPH Commercial,morph,43.1576969,-77.58829158,University of Rochester,edu,4b9ec224949c79a980a5a66664d0ac6233c3d575,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7565501,Human Facial Age Estimation by Cost-Sensitive Label Ranking and Trace Norm Regularization,2017 +35,MORPH Commercial,morph,1.2962018,103.77689944,National University of Singapore,edu,4b9ec224949c79a980a5a66664d0ac6233c3d575,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7565501,Human Facial Age Estimation by Cost-Sensitive Label Ranking and Trace Norm Regularization,2017 +36,MORPH Commercial,morph,40.0044795,116.370238,Chinese Academy of Sciences,edu,bd8b7599acf53e3053aa27cfd522764e28474e57,citation,http://www.jdl.ac.cn/doc/2009/iccv09_Learning%20Long%20Term%20Face%20Aging%20Patterns%20from%20Partially%20Dense%20Aging%20Databases.pdf,Learning long term face aging patterns from partially dense aging databases,2009 +37,MORPH Commercial,morph,39.9922379,116.30393816,Peking University,edu,bd8b7599acf53e3053aa27cfd522764e28474e57,citation,http://www.jdl.ac.cn/doc/2009/iccv09_Learning%20Long%20Term%20Face%20Aging%20Patterns%20from%20Partially%20Dense%20Aging%20Databases.pdf,Learning long term face aging patterns from partially dense aging databases,2009 +38,MORPH Commercial,morph,43.614386,7.071125,EURECOM,edu,70569810e46f476515fce80a602a210f8d9a2b95,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.105,Apparent Age Estimation from Face Images Combining General and Children-Specialized Deep Learning Models,2016 +39,MORPH Commercial,morph,39.9213097,32.7988233,"TOBB Economy and Technology University, Ankara, Turkey",edu,cc1ed45b02d7fffb42a0fd8cffe5f11792b6ea74,citation,https://doi.org/10.1109/SIU.2016.7495874,Analysis of the effect of image resolution on automatic face gender and age classification,2016 +40,MORPH Commercial,morph,-33.91758275,151.23124025,University of New South Wales,edu,29631ca6cff21c9199c70bcdbbcd5f812d331a96,citation,http://pdfs.semanticscholar.org/2963/1ca6cff21c9199c70bcdbbcd5f812d331a96.pdf,Error Rates in Users of Automatic Face Recognition Software,2015 +41,MORPH Commercial,morph,-33.88890695,151.18943366,University of Sydney,edu,29631ca6cff21c9199c70bcdbbcd5f812d331a96,citation,http://pdfs.semanticscholar.org/2963/1ca6cff21c9199c70bcdbbcd5f812d331a96.pdf,Error Rates in Users of Automatic Face Recognition Software,2015 +42,MORPH Commercial,morph,42.718568,-84.47791571,Michigan State University,edu,1a53ca294bbe5923c46a339955e8207907e9c8c6,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7273870,What Else Does Your Biometric Data Reveal? A Survey on Soft Biometrics,2016 +43,MORPH Commercial,morph,43.614386,7.071125,EURECOM,edu,1a53ca294bbe5923c46a339955e8207907e9c8c6,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7273870,What Else Does Your Biometric Data Reveal? A Survey on Soft Biometrics,2016 +44,MORPH Commercial,morph,40.4319722,-86.92389368,Purdue University,edu,c7c53d75f6e963b403057d8ba5952e4974a779ad,citation,https://pdfs.semanticscholar.org/c7c5/3d75f6e963b403057d8ba5952e4974a779ad.pdf,Aging effects in automated face recognition,2018 +45,MORPH Commercial,morph,41.02451875,28.97697953,Bahçeşehir University,edu,0c2370e156a4eb8d84a5fdb049c5a894c3431f1c,citation,https://doi.org/10.1109/CIBIM.2014.7015437,Biometric template update under facial aging,2014 +46,MORPH Commercial,morph,53.22853665,-0.54873472,University of Lincoln,edu,0c2370e156a4eb8d84a5fdb049c5a894c3431f1c,citation,https://doi.org/10.1109/CIBIM.2014.7015437,Biometric template update under facial aging,2014 +47,MORPH Commercial,morph,46.0810723,13.2119474,University of Udine,edu,0c2370e156a4eb8d84a5fdb049c5a894c3431f1c,citation,https://doi.org/10.1109/CIBIM.2014.7015437,Biometric template update under facial aging,2014 +48,MORPH Commercial,morph,25.0410728,121.6147562,Institute of Information Science,edu,1c17450c4d616e1e1eece248c42eba4f87de9e0d,citation,http://pdfs.semanticscholar.org/d269/39a00a8d3964de612cd3faa86764343d5622.pdf,Automatic Age Estimation from Face Images via Deep Ranking,2015 +49,MORPH Commercial,morph,43.47061295,-80.54724732,University of Waterloo,edu,f2902f5956d7e2dca536d9131d4334f85f52f783,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460191,Facial age estimation using Clustered Multi-task Support Vector Regression Machine,2012 +50,MORPH Commercial,morph,39.65404635,-79.96475355,West Virginia University,edu,ba2bbef34f05551291410103e3de9e82fdf9dddd,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Guo_A_Study_on_2014_CVPR_paper.pdf,A Study on Cross-Population Age Estimation,2014 +51,MORPH Commercial,morph,31.32235655,121.38400941,Shanghai University,edu,d454ad60b061c1a1450810a0f335fafbfeceeccc,citation,https://arxiv.org/pdf/1712.07195.pdf,Deep Regression Forests for Age Estimation,2017 +52,MORPH Commercial,morph,42.718568,-84.47791571,Michigan State University,edu,ad2cb5c255e555d9767d526721a4c7053fa2ac58,citation,https://arxiv.org/pdf/1711.03990.pdf,Longitudinal Study of Child Face Recognition,2018 +53,MORPH Commercial,morph,39.95472495,-75.15346905,Temple University,edu,0cf2eecf20cfbcb7f153713479e3206670ea0e9c,citation,https://arxiv.org/pdf/1806.08906.pdf,Privacy-Protective-GAN for Face De-identification,2018 +54,MORPH Commercial,morph,31.32235655,121.38400941,Shanghai University,edu,c0b02be66a5a1907e8cfb8117de50f80b90a65a8,citation,http://doi.acm.org/10.1145/2808492.2808523,Manifold learning in sparse selected feature subspaces,2015 +55,MORPH Commercial,morph,47.6423318,-122.1369302,Microsoft,company,ff012c56b9b1de969328dacd13e26b7138ff298b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7762921,Facial Age Estimation With Age Difference,2017 +56,MORPH Commercial,morph,1.2962018,103.77689944,National University of Singapore,edu,ff012c56b9b1de969328dacd13e26b7138ff298b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7762921,Facial Age Estimation With Age Difference,2017 +57,MORPH Commercial,morph,31.846918,117.29053367,Hefei University of Technology,edu,ff012c56b9b1de969328dacd13e26b7138ff298b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7762921,Facial Age Estimation With Age Difference,2017 +58,MORPH Commercial,morph,1.3484104,103.68297965,Nanyang Technological University,edu,ff012c56b9b1de969328dacd13e26b7138ff298b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7762921,Facial Age Estimation With Age Difference,2017 +59,MORPH Commercial,morph,40.00229045,116.32098908,Tsinghua University,edu,2149d49c84a83848d6051867290d9c8bfcef0edb,citation,https://doi.org/10.1109/TIFS.2017.2746062,Label-Sensitive Deep Metric Learning for Facial Age Estimation,2018 +60,MORPH Commercial,morph,25.0410728,121.6147562,Institute of Information Science,edu,c44c84540db1c38ace232ef34b03bda1c81ba039,citation,http://pdfs.semanticscholar.org/c44c/84540db1c38ace232ef34b03bda1c81ba039.pdf,Cross-Age Reference Coding for Age-Invariant Face Recognition and Retrieval,2014 +61,MORPH Commercial,morph,25.01682835,121.53846924,National Taiwan University,edu,c44c84540db1c38ace232ef34b03bda1c81ba039,citation,http://pdfs.semanticscholar.org/c44c/84540db1c38ace232ef34b03bda1c81ba039.pdf,Cross-Age Reference Coding for Age-Invariant Face Recognition and Retrieval,2014 +62,MORPH Commercial,morph,33.5866784,-101.87539204,Electrical and Computer Engineering,edu,ebb3d5c70bedf2287f9b26ac0031004f8f617b97,citation,https://doi.org/10.1109/MSP.2017.2764116,"Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans",2018 +63,MORPH Commercial,morph,39.2899685,-76.62196103,University of Maryland,edu,ebb3d5c70bedf2287f9b26ac0031004f8f617b97,citation,https://doi.org/10.1109/MSP.2017.2764116,"Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans",2018 +64,MORPH Commercial,morph,42.718568,-84.47791571,Michigan State University,edu,2f2406551c693d616a840719ae1e6ea448e2f5d3,citation,http://biometrics.cse.msu.edu/Presentations/CharlesOtto_ICB13_AgeEstimationFaceImages_HumanVsMachinePerformance.pdf,Age estimation from face images: Human vs. machine performance,2013 +65,MORPH Commercial,morph,1.3037257,103.7737763,"Advanced Digital Sciences Center, Singapore",edu,15fbb5fc3bdd692a6b2dd737cce7f39f7c89a25c,citation,https://doi.org/10.1109/TMM.2011.2167317,Web Image and Video Mining Towards Universal and Robust Age Estimator,2011 +66,MORPH Commercial,morph,1.2962018,103.77689944,National University of Singapore,edu,15fbb5fc3bdd692a6b2dd737cce7f39f7c89a25c,citation,https://doi.org/10.1109/TMM.2011.2167317,Web Image and Video Mining Towards Universal and Robust Age Estimator,2011 +67,MORPH Commercial,morph,42.718568,-84.47791571,Michigan State University,edu,b446bcd7fb78adfe346cf7a01a38e4f43760f363,citation,http://pdfs.semanticscholar.org/b446/bcd7fb78adfe346cf7a01a38e4f43760f363.pdf,To appear in ICB 2018 Longitudinal Study of Child Face Recognition,2017 +68,MORPH Commercial,morph,42.718568,-84.47791571,Michigan State University,edu,c035c193eed5d72c7f187f0bc880a17d217dada0,citation,http://pdfs.semanticscholar.org/c035/c193eed5d72c7f187f0bc880a17d217dada0.pdf,"Local Gradient Gabor Pattern (LGGP) with Applications in Face Recognition, Cross-spectral Matching and Soft Biometrics",2013 +69,MORPH Commercial,morph,39.65404635,-79.96475355,West Virginia University,edu,c035c193eed5d72c7f187f0bc880a17d217dada0,citation,http://pdfs.semanticscholar.org/c035/c193eed5d72c7f187f0bc880a17d217dada0.pdf,"Local Gradient Gabor Pattern (LGGP) with Applications in Face Recognition, Cross-spectral Matching and Soft Biometrics",2013 +70,MORPH Commercial,morph,34.66869155,-82.83743476,Clemson University,edu,c907104680ad53bdc673f2648d713e4d26335825,citation,http://doi.acm.org/10.1145/3077286.3077304,Dataset and Metrics for Adult Age-Progression Evaluation,2017 +71,MORPH Commercial,morph,34.2375581,-77.9270129,University of North Carolina Wilmington,edu,c907104680ad53bdc673f2648d713e4d26335825,citation,http://doi.acm.org/10.1145/3077286.3077304,Dataset and Metrics for Adult Age-Progression Evaluation,2017 +72,MORPH Commercial,morph,37.5600406,126.9369248,Yonsei University,edu,fde41dc4ec6ac6474194b99e05b43dd6a6c4f06f,citation,https://arxiv.org/pdf/1809.01990.pdf,Multi-Expert Gender Classification on Age Group by Integrating Deep Neural Networks,2018 +73,MORPH Commercial,morph,34.2375581,-77.9270129,University of North Carolina Wilmington,edu,31a36014354ee7c89aa6d94e656db77922b180a5,citation,http://doi.acm.org/10.1145/2304496.2304509,An interactive tool for extremely dense landmarking of faces,2012 +74,MORPH Commercial,morph,37.5901411,127.0362318,Korea University,edu,4b519e2e88ccd45718b0fc65bfd82ebe103902f7,citation,http://biometrics.cse.msu.edu/Publications/Face/LiParkJain_DiscriminativeModelAgeInvariantFR_TIFS11.pdf,A Discriminative Model for Age Invariant Face Recognition,2011 +75,MORPH Commercial,morph,42.718568,-84.47791571,Michigan State University,edu,4b519e2e88ccd45718b0fc65bfd82ebe103902f7,citation,http://biometrics.cse.msu.edu/Publications/Face/LiParkJain_DiscriminativeModelAgeInvariantFR_TIFS11.pdf,A Discriminative Model for Age Invariant Face Recognition,2011 +76,MORPH Commercial,morph,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,4b519e2e88ccd45718b0fc65bfd82ebe103902f7,citation,http://biometrics.cse.msu.edu/Publications/Face/LiParkJain_DiscriminativeModelAgeInvariantFR_TIFS11.pdf,A Discriminative Model for Age Invariant Face Recognition,2011 +77,MORPH Commercial,morph,23.09461185,113.28788994,Sun Yat-Sen University,edu,23edcd0d2011d9c0d421193af061f2eb3e155da3,citation,http://doi.org/10.1007/s00371-015-1137-4,Facial age estimation by using stacked feature composition and selection,2015 +78,MORPH Commercial,morph,23.04436505,113.36668458,Guangzhou University,edu,23edcd0d2011d9c0d421193af061f2eb3e155da3,citation,http://doi.org/10.1007/s00371-015-1137-4,Facial age estimation by using stacked feature composition and selection,2015 +79,MORPH Commercial,morph,38.9530519,-77.3354508,"Cernium Corporation, Reston, VA, USA",company,604a281100784b4d5bc1a6db993d423abc5dc8f0,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5353681,Face Verification Across Age Progression Using Discriminative Methods,2010 +80,MORPH Commercial,morph,39.2899685,-76.62196103,University of Maryland,edu,604a281100784b4d5bc1a6db993d423abc5dc8f0,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5353681,Face Verification Across Age Progression Using Discriminative Methods,2010 +81,MORPH Commercial,morph,39.95472495,-75.15346905,Temple University,edu,604a281100784b4d5bc1a6db993d423abc5dc8f0,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5353681,Face Verification Across Age Progression Using Discriminative Methods,2010 +82,MORPH Commercial,morph,51.2975344,1.07296165,University of Kent,edu,6486b36c6f7fd7675257d26e896223a02a1881d9,citation,https://doi.org/10.1109/THMS.2014.2376874,Selective Review and Analysis of Aging Effects in Biometric System Implementation,2015 +83,MORPH Commercial,morph,22.42031295,114.20788644,Chinese University of Hong Kong,edu,16bce9f940bb01aa5ec961892cc021d4664eb9e4,citation,http://www.cise.ufl.edu/~dihong/assets/TIST-2014-10-0214.R2.pdf,Mutual Component Analysis for Heterogeneous Face Recognition,2016 +84,MORPH Commercial,morph,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,16bce9f940bb01aa5ec961892cc021d4664eb9e4,citation,http://www.cise.ufl.edu/~dihong/assets/TIST-2014-10-0214.R2.pdf,Mutual Component Analysis for Heterogeneous Face Recognition,2016 +85,MORPH Commercial,morph,34.67567405,33.04577648,Cyprus University of Technology,edu,9d3aa3b7d392fad596b067b13b9e42443bbc377c,citation,http://pdfs.semanticscholar.org/9d3a/a3b7d392fad596b067b13b9e42443bbc377c.pdf,Facial Biometric Templates and Aging: Problems and Challenges for Artificial Intelligence,2009 +86,MORPH Commercial,morph,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,217a21d60bb777d15cd9328970cab563d70b5d23,citation,http://www.cise.ufl.edu/~dihong/assets/iccv2013.pdf,Hidden Factor Analysis for Age Invariant Face Recognition,2013 +87,MORPH Commercial,morph,22.42031295,114.20788644,Chinese University of Hong Kong,edu,217a21d60bb777d15cd9328970cab563d70b5d23,citation,http://www.cise.ufl.edu/~dihong/assets/iccv2013.pdf,Hidden Factor Analysis for Age Invariant Face Recognition,2013 +88,MORPH Commercial,morph,32.0565957,118.77408833,Nanjing University,edu,b1bb517bd87a1212174033fc786b2237844b04e6,citation,https://doi.org/10.1016/j.neucom.2015.03.078,Cumulative attribute relation regularization learning for human age estimation,2015 +89,MORPH Commercial,morph,40.8419836,-73.94368971,Columbia University,edu,a0dc68c546e0fc72eb0d9ca822cf0c9ccb4b4c4f,citation,http://www.cs.columbia.edu/~neeraj/base/papers/nk_ijcb2011_fusion.pdf,Fusing with context: A Bayesian approach to combining descriptive attributes,2011 +90,MORPH Commercial,morph,34.2375581,-77.9270129,University of North Carolina Wilmington,edu,a0dc68c546e0fc72eb0d9ca822cf0c9ccb4b4c4f,citation,http://www.cs.columbia.edu/~neeraj/base/papers/nk_ijcb2011_fusion.pdf,Fusing with context: A Bayesian approach to combining descriptive attributes,2011 +91,MORPH Commercial,morph,1.3484104,103.68297965,Nanyang Technological University,edu,d119443de1d75cad384d897c2ed5a7b9c1661d98,citation,https://doi.org/10.1109/ICIP.2010.5650873,Cost-sensitive subspace learning for human age estimation,2010 +92,MORPH Commercial,morph,34.2249827,-77.86907744,University of North Carolina at Wilmington,edu,97c59db934ff85c60c460a4591106682b5ab9caa,citation,https://doi.org/10.1109/BTAS.2012.6374568,Extremely dense face registration: Comparing automatic landmarking algorithms for general and ethno-gender models,2012 +93,MORPH Commercial,morph,43.2213516,-75.4085577,"Air Force Research Lab, Rome, NY",mil,834736698f2cc5c221c22369abe95515243a9fc3,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6996249,GARP-face: Balancing privacy protection and utility preservation in face de-identification,2014 +94,MORPH Commercial,morph,39.95472495,-75.15346905,Temple University,edu,834736698f2cc5c221c22369abe95515243a9fc3,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6996249,GARP-face: Balancing privacy protection and utility preservation in face de-identification,2014 +95,MORPH Commercial,morph,32.0575279,118.78682252,Southeast University,edu,3cb488a3b71f221a8616716a1fc2b951dd0de549,citation,http://doi.ieeecomputersociety.org/10.1109/ICPR.2014.764,Facial Age Estimation by Adaptive Label Distribution Learning,2014 +96,MORPH Commercial,morph,22.3386304,114.2620337,Hong Kong University of Science and Technology,edu,8000c4f278e9af4d087c0d0895fff7012c5e3d78,citation,https://www.cse.ust.hk/~yuzhangcse/papers/Zhang_Yeung_CVPR10.pdf,Multi-task warped Gaussian process for personalized age estimation,2010 +97,MORPH Commercial,morph,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,59fe66eeb06d1a7e1496a85f7ffc7b37512cd7e5,citation,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552862,Robust feature encoding for age-invariant face recognition,2016 +98,MORPH Commercial,morph,23.0502042,113.39880323,South China University of Technology,edu,4bd3de97b256b96556d19a5db71dda519934fd53,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.529,Latent Factor Guided Convolutional Neural Networks for Age-Invariant Face Recognition,2016 +99,MORPH Commercial,morph,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,4bd3de97b256b96556d19a5db71dda519934fd53,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.529,Latent Factor Guided Convolutional Neural Networks for Age-Invariant Face Recognition,2016 +100,MORPH Commercial,morph,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,1d3dd9aba79a53390317ec1e0b7cd742cba43132,citation,http://www.cise.ufl.edu/~dihong/assets/Gong_A_Maximum_Entropy_2015_CVPR_paper.pdf,A maximum entropy feature descriptor for age invariant face recognition,2015 +101,MORPH Commercial,morph,40.0044795,116.370238,Chinese Academy of Sciences,edu,1d3dd9aba79a53390317ec1e0b7cd742cba43132,citation,http://www.cise.ufl.edu/~dihong/assets/Gong_A_Maximum_Entropy_2015_CVPR_paper.pdf,A maximum entropy feature descriptor for age invariant face recognition,2015 +102,MORPH Commercial,morph,22.42031295,114.20788644,Chinese University of Hong Kong,edu,1d3dd9aba79a53390317ec1e0b7cd742cba43132,citation,http://www.cise.ufl.edu/~dihong/assets/Gong_A_Maximum_Entropy_2015_CVPR_paper.pdf,A maximum entropy feature descriptor for age invariant face recognition,2015 +103,MORPH Commercial,morph,32.0575279,118.78682252,Southeast University,edu,1c530de1a94ac70bf9086e39af1712ea8d2d2781,citation,http://pdfs.semanticscholar.org/1c53/0de1a94ac70bf9086e39af1712ea8d2d2781.pdf,Sparsity Conditional Energy Label Distribution Learning for Age Estimation,2016 +104,MORPH Commercial,morph,37.4102193,-122.05965487,Carnegie Mellon University,edu,eb8519cec0d7a781923f68fdca0891713cb81163,citation,https://arxiv.org/pdf/1703.08617.pdf,Temporal Non-volume Preserving Approach to Facial Age-Progression and Age-Invariant Face Recognition,2017 +105,MORPH Commercial,morph,45.57022705,-122.63709346,Concordia University,edu,eb8519cec0d7a781923f68fdca0891713cb81163,citation,https://arxiv.org/pdf/1703.08617.pdf,Temporal Non-volume Preserving Approach to Facial Age-Progression and Age-Invariant Face Recognition,2017 +106,MORPH Commercial,morph,57.6252103,39.8845656,Yaroslavl State University,edu,cfaf61bacf61901b7e1ac25b779a1f87c1e8cf7f,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6737950,Application for video analysis based on machine learning and computer vision algorithms,2013 +107,MORPH Commercial,morph,51.49887085,-0.17560797,Imperial College London,edu,54bb25a213944b08298e4e2de54f2ddea890954a,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf,"AgeDB: The First Manually Collected, In-the-Wild Age Database",2017 +108,MORPH Commercial,morph,51.59029705,-0.22963221,Middlesex University,edu,54bb25a213944b08298e4e2de54f2ddea890954a,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf,"AgeDB: The First Manually Collected, In-the-Wild Age Database",2017 +109,MORPH Commercial,morph,37.4102193,-122.05965487,Carnegie Mellon University,edu,17670b60dcfb5cbf8fdae0b266e18cf995f6014c,citation,http://arxiv.org/abs/1606.02254,Longitudinal Face Modeling via Temporal Deep Restricted Boltzmann Machines,2016 +110,MORPH Commercial,morph,45.57022705,-122.63709346,Concordia University,edu,17670b60dcfb5cbf8fdae0b266e18cf995f6014c,citation,http://arxiv.org/abs/1606.02254,Longitudinal Face Modeling via Temporal Deep Restricted Boltzmann Machines,2016 +111,MORPH Commercial,morph,46.0658836,11.1159894,University of Trento,edu,2fd96238a7e372146cdf6c2338edc932031dd1f0,citation,https://arxiv.org/pdf/1802.00237.pdf,Face Aging with Contextual Generative Adversarial Nets,2017 +112,MORPH Commercial,morph,1.2962018,103.77689944,National University of Singapore,edu,2fd96238a7e372146cdf6c2338edc932031dd1f0,citation,https://arxiv.org/pdf/1802.00237.pdf,Face Aging with Contextual Generative Adversarial Nets,2017 +113,MORPH Commercial,morph,51.44415765,7.26096541,Ruhr-University Bochum,edu,b249f10a30907a80f2a73582f696bc35ba4db9e2,citation,http://pdfs.semanticscholar.org/f06d/6161eef9325285b32356e1c4b5527479eb9b.pdf,Improved graph-based SFA: Information preservation complements the slowness principle,2016 +114,MORPH Commercial,morph,39.9808333,116.34101249,Beihang University,edu,8b266e68cc71f98ee42b04dc8f3e336c47f199cb,citation,https://arxiv.org/pdf/1711.10352.pdf,Learning Face Age Progression: A Pyramid Architecture of GANs,2017 +115,MORPH Commercial,morph,42.718568,-84.47791571,Michigan State University,edu,8b266e68cc71f98ee42b04dc8f3e336c47f199cb,citation,https://arxiv.org/pdf/1711.10352.pdf,Learning Face Age Progression: A Pyramid Architecture of GANs,2017 +116,MORPH Commercial,morph,32.0565957,118.77408833,Nanjing University,edu,0e2d956790d3b8ab18cee8df6c949504ee78ad42,citation,https://doi.org/10.1109/IVCNZ.2013.6727024,Scalable face image retrieval integrating multi-feature quantization and constrained reference re-ranking,2013 +117,MORPH Commercial,morph,40.0044795,116.370238,Chinese Academy of Sciences,edu,2a7e6a1b2638550370a47f2f6f6e38e76fe9ac13,citation,http://doi.acm.org/10.1145/3090311,Multifeature Anisotropic Orthogonal Gaussian Process for Automatic Age Estimation,2017 +118,MORPH Commercial,morph,-33.88890695,151.18943366,University of Sydney,edu,2a7e6a1b2638550370a47f2f6f6e38e76fe9ac13,citation,http://doi.acm.org/10.1145/3090311,Multifeature Anisotropic Orthogonal Gaussian Process for Automatic Age Estimation,2017 +119,MORPH Commercial,morph,51.2975344,1.07296165,University of Kent,edu,2336de3a81dada63eb00ea82f7570c4069342fb5,citation,http://doi.acm.org/10.1145/2361407.2361428,A methodological framework for investigating age factors on the performance of biometric systems,2012 +120,MORPH Commercial,morph,39.2899685,-76.62196103,University of Maryland,edu,93420d9212dd15b3ef37f566e4d57e76bb2fab2f,citation,https://arxiv.org/pdf/1611.00851.pdf,An All-In-One Convolutional Neural Network for Face Analysis,2017 +121,MORPH Commercial,morph,39.95472495,-75.15346905,Temple University,edu,019e471667c72b5b3728b4a9ba9fe301a7426fb2,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2A_012.pdf,Cross-age face verification by coordinating with cross-face age verification,2015 +122,MORPH Commercial,morph,45.57022705,-122.63709346,Concordia University,edu,c418a3441f992fea523926f837f4bfb742548c16,citation,http://pdfs.semanticscholar.org/c418/a3441f992fea523926f837f4bfb742548c16.pdf,A Computer Approach for Face Aging Problems,2010 +123,MORPH Commercial,morph,22.42031295,114.20788644,Chinese University of Hong Kong,edu,d80a3d1f3a438e02a6685e66ee908446766fefa9,citation,https://arxiv.org/pdf/1708.09687.pdf,Quantifying Facial Age by Posterior of Age Comparisons,2017 +124,MORPH Commercial,morph,34.67567405,33.04577648,Cyprus University of Technology,edu,ebbceab4e15bf641f74e335b70c6c4490a043961,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813349,Evaluating the performance of face-aging algorithms,2008 +125,MORPH Commercial,morph,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,d84a48f7d242d73b32a9286f9b148f5575acf227,citation,http://pdfs.semanticscholar.org/d84a/48f7d242d73b32a9286f9b148f5575acf227.pdf,Global and Local Consistent Age Generative Adversarial Networks,2018 +126,MORPH Commercial,morph,12.9551259,77.5741985,Bangalore Institute of Technology,edu,8f5facdc0a2a79283864aad03edc702e2a400346,citation,http://pdfs.semanticscholar.org/8f5f/acdc0a2a79283864aad03edc702e2a400346.pdf,Estimation Framework using Bio - Inspired Features for Facial Image,0 +127,MORPH Commercial,morph,42.718568,-84.47791571,Michigan State University,edu,08f6ad0a3e75b715852f825d12b6f28883f5ca05,citation,http://www.cse.msu.edu/biometrics/Publications/Face/JainKlarePark_FaceRecognition_ChallengesinForensics_FG11.pdf,Face recognition: Some challenges in forensics,2011 +128,MORPH Commercial,morph,41.10427915,29.02231159,Istanbul Technical University,edu,2050847bc7a1a0453891f03aeeb4643e360fde7d,citation,https://cvhci.anthropomatik.kit.edu/~mtapaswi/papers/ICMR2015.pdf,Accio: A Data Set for Face Track Retrieval in Movies Across Age,2015 +129,MORPH Commercial,morph,49.10184375,8.4331256,Karlsruhe Institute of Technology,edu,2050847bc7a1a0453891f03aeeb4643e360fde7d,citation,https://cvhci.anthropomatik.kit.edu/~mtapaswi/papers/ICMR2015.pdf,Accio: A Data Set for Face Track Retrieval in Movies Across Age,2015 +130,MORPH Commercial,morph,40.62984145,22.9588935,Aristotle University of Thessaloniki,edu,3cc46bf79fb9225cf308815c7d41c8dd5625cc29,citation,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/2016/Pantraki2016.pdf,Age interval and gender prediction using PARAFAC2 applied to speech utterances,2016 +131,MORPH Commercial,morph,34.67567405,33.04577648,Cyprus University of Technology,edu,3cc46bf79fb9225cf308815c7d41c8dd5625cc29,citation,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/2016/Pantraki2016.pdf,Age interval and gender prediction using PARAFAC2 applied to speech utterances,2016 +132,MORPH Commercial,morph,23.09461185,113.28788994,Sun Yat-Sen University,edu,189e5a2fa51ed471c0e7227d82dffb52736070d8,citation,https://doi.org/10.1109/ICIP.2017.8296995,Cross-age face recognition using reference coding with kernel direct discriminant analysis,2017 +133,MORPH Commercial,morph,42.357757,-83.06286711,Wayne State University,edu,4f1249369127cc2e2894f6b2f1052d399794919a,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8239663,Deep Age Estimation: From Classification to Ranking,2018 +134,MORPH Commercial,morph,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,cd6aaa37fffd0b5c2320f386be322b8adaa1cc68,citation,https://arxiv.org/pdf/1804.06655.pdf,Deep Face Recognition: A Survey,2018 +135,MORPH Commercial,morph,52.3553655,4.9501644,University of Amsterdam,edu,14014a1bdeb5d63563b68b52593e3ac1e3ce7312,citation,http://pdfs.semanticscholar.org/1401/4a1bdeb5d63563b68b52593e3ac1e3ce7312.pdf,Expression-Invariant Age Estimation,2014 +136,MORPH Commercial,morph,31.83907195,117.26420748,University of Science and Technology of China,edu,659dc6aa517645a118b79f0f0273e46ab7b53cd9,citation,https://doi.org/10.1109/ACPR.2015.7486608,Age-invariant face recognition using a feature progressing model,2015 +137,MORPH Commercial,morph,30.0818727,31.24454841,Benha University,edu,a9fc23d612e848250d5b675e064dba98f05ad0d9,citation,http://pdfs.semanticscholar.org/a9fc/23d612e848250d5b675e064dba98f05ad0d9.pdf,Face Age Estimation Approach based on Deep Learning and Principle Component Analysis,2018 +138,MORPH Commercial,morph,31.51368535,34.44019341,"Islamic University of Gaza, Palestine",edu,d5fa9d98c8da54a57abf353767a927d662b7f026,citation,http://pdfs.semanticscholar.org/f15e/9712b8731e1f5fd9566aca513edda910b5b8.pdf,Age Estimation based on Neural Networks using Face Features,2010 +139,MORPH Commercial,morph,32.0575279,118.78682252,Southeast University,edu,8ff8c64288a2f7e4e8bf8fda865820b04ab3dbe8,citation,https://pdfs.semanticscholar.org/0056/92b9fa6728df3a7f14578c43410867bba425.pdf,Age Estimation Using Expectation of Label Distribution Learning,2018 +140,MORPH Commercial,morph,32.0565957,118.77408833,Nanjing University,edu,8ff8c64288a2f7e4e8bf8fda865820b04ab3dbe8,citation,https://pdfs.semanticscholar.org/0056/92b9fa6728df3a7f14578c43410867bba425.pdf,Age Estimation Using Expectation of Label Distribution Learning,2018 +141,MORPH Commercial,morph,34.0224149,-118.28634407,University of Southern California,edu,eb6ee56e085ebf473da990d032a4249437a3e462,citation,http://www-scf.usc.edu/~chuntinh/doc/Age_Gender_Classification_APSIPA_2017.pdf,Age/gender classification with whole-component convolutional neural networks (WC-CNN),2017 +142,MORPH Commercial,morph,42.718568,-84.47791571,Michigan State University,edu,e506cdb250eba5e70c5147eb477fbd069714765b,citation,https://pdfs.semanticscholar.org/e506/cdb250eba5e70c5147eb477fbd069714765b.pdf,Heterogeneous Face Recognition,2012 +143,MORPH Commercial,morph,35.90503535,-79.04775327,University of North Carolina,edu,f374ac9307be5f25145b44931f5a53b388a77e49,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339060,Improvements in Active Appearance Model based synthetic age progression for adult aging,2009 +144,MORPH Commercial,morph,38.83133325,-77.30798839,George Mason University,edu,62750d78e819d745b9200b0c5c35fcae6fb9f404,citation,http://doi.org/10.1007/s11042-016-4085-8,Leveraging implicit demographic information for face recognition using a multi-expert system,2016 +145,MORPH Commercial,morph,41.9037626,12.5144384,Sapienza University of Rome,edu,62750d78e819d745b9200b0c5c35fcae6fb9f404,citation,http://doi.org/10.1007/s11042-016-4085-8,Leveraging implicit demographic information for face recognition using a multi-expert system,2016 +146,MORPH Commercial,morph,40.845492,14.2578058,University of Naples Federico II,edu,62750d78e819d745b9200b0c5c35fcae6fb9f404,citation,http://doi.org/10.1007/s11042-016-4085-8,Leveraging implicit demographic information for face recognition using a multi-expert system,2016 +147,MORPH Commercial,morph,25.01353105,121.54173736,National Taiwan University of Science and Technology,edu,e4c3587392d477b7594086c6f28a00a826abf004,citation,https://doi.org/10.1109/ICIP.2017.8296998,Face recognition by facial attribute assisted network,2017 +148,MORPH Commercial,morph,39.9922379,116.30393816,Peking University,edu,c4ca092972abb74ee1c20b7cae6e69c654479e2c,citation,https://doi.org/10.1109/ICIP.2016.7532960,Linear canonical correlation analysis based ranking approach for facial age estimation,2016 +149,MORPH Commercial,morph,40.0044795,116.370238,Chinese Academy of Sciences,edu,575141e42740564f64d9be8ab88d495192f5b3bc,citation,http://pdfs.semanticscholar.org/5751/41e42740564f64d9be8ab88d495192f5b3bc.pdf,Age Estimation Based on Multi-Region Convolutional Neural Network,2016 +150,MORPH Commercial,morph,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,575141e42740564f64d9be8ab88d495192f5b3bc,citation,http://pdfs.semanticscholar.org/5751/41e42740564f64d9be8ab88d495192f5b3bc.pdf,Age Estimation Based on Multi-Region Convolutional Neural Network,2016 +151,MORPH Commercial,morph,56.66340325,12.87929727,Halmstad University,edu,555f75077a02f33a05841f9b63a1388ec5fbcba5,citation,https://arxiv.org/pdf/1810.03360.pdf,A Survey on Periocular Biometrics Research,2016 +152,MORPH Commercial,morph,39.94976005,116.33629046,Beijing Jiaotong University,edu,0821028073981f9bd2dba2ad2557b25403fe7d7d,citation,http://doi.acm.org/10.1145/2733373.2806318,Facial Age Estimation Based on Structured Low-rank Representation,2015 +153,MORPH Commercial,morph,46.109237,7.08453549,IDIAP Research Institute,edu,939123cf21dc9189a03671484c734091b240183e,citation,http://publications.idiap.ch/downloads/papers/2015/Erdogmus_MMSP_2015.pdf,Within- and cross- database evaluations for face gender classification via befit protocols,2014 +154,MORPH Commercial,morph,36.689487,2.981877,"Center for Development of Advanced Technologies, Algeria",edu,4551194408383b12db19a22cca5db0f185cced5c,citation,https://doi.org/10.1109/TNNLS.2014.2341634,Nonlinear Topological Component Analysis: Application to Age-Invariant Face Recognition,2015 +155,MORPH Commercial,morph,56.45796755,-2.98214831,University of Dundee,edu,8b10383ef569ea0029a2c4a60cc2d8c87391b4db,citation,http://pdfs.semanticscholar.org/fe2d/20dca6dcedc7944cc2d9fea76de6cbb9d90c.pdf,Age classification using Radon transform and entropy based scaling SVM,2011 +156,MORPH Commercial,morph,40.0044795,116.370238,Chinese Academy of Sciences,edu,d37ca68742b2999667faf464f78d2fbf81e0cb07,citation,https://doi.org/10.1007/978-3-319-25417-3_76,DFDnet: Discriminant Face Descriptor Network for Facial Age Estimation,2015 +157,MORPH Commercial,morph,-35.2776999,149.118527,Australian National University,edu,a7191958e806fce2505a057196ccb01ea763b6ea,citation,http://pdfs.semanticscholar.org/a719/1958e806fce2505a057196ccb01ea763b6ea.pdf,Convolutional Neural Network based Age Estimation from Facial Image and Depth Prediction from Single Image,2016 +158,MORPH Commercial,morph,35.907757,127.766922,"Electronics and Telecommunications Research Institute, Korea",edu,abbc6dcbd032ff80e0535850f1bc27c4610b0d45,citation,https://doi.org/10.1109/ICIP.2015.7350983,Facial age estimation via extended curvature Gabor filter,2015 +159,MORPH Commercial,morph,36.3697191,127.362537,Korea Advanced Institute of Science and Technology,edu,abbc6dcbd032ff80e0535850f1bc27c4610b0d45,citation,https://doi.org/10.1109/ICIP.2015.7350983,Facial age estimation via extended curvature Gabor filter,2015 +160,MORPH Commercial,morph,1.2962018,103.77689944,National University of Singapore,edu,989332c5f1b22604d6bb1f78e606cb6b1f694e1a,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Wang_Recurrent_Face_Aging_CVPR_2016_paper.pdf,Recurrent Face Aging,2016 +161,MORPH Commercial,morph,32.0575279,118.78682252,Southeast University,edu,989332c5f1b22604d6bb1f78e606cb6b1f694e1a,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Wang_Recurrent_Face_Aging_CVPR_2016_paper.pdf,Recurrent Face Aging,2016 +162,MORPH Commercial,morph,46.0658836,11.1159894,University of Trento,edu,989332c5f1b22604d6bb1f78e606cb6b1f694e1a,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Wang_Recurrent_Face_Aging_CVPR_2016_paper.pdf,Recurrent Face Aging,2016 +163,MORPH Commercial,morph,40.62984145,22.9588935,Aristotle University of Thessaloniki,edu,1fd3dbb6e910708fa85c8a86e17ba0b6fef5617c,citation,http://pdfs.semanticscholar.org/1fd3/dbb6e910708fa85c8a86e17ba0b6fef5617c.pdf,Age interval and gender prediction using PARAFAC2 on speech recordings and face images,2016 +164,MORPH Commercial,morph,40.00229045,116.32098908,Tsinghua University,edu,6c6f0e806e4e286f3b18b934f42c72b67030ce17,citation,https://doi.org/10.1109/FG.2011.5771345,Combination of age and head pose for adult face verification,2011 +165,MORPH Commercial,morph,46.5190557,6.5667576,"Swiss Federal, Institute of Technology, Lausanne",edu,6c6f0e806e4e286f3b18b934f42c72b67030ce17,citation,https://doi.org/10.1109/FG.2011.5771345,Combination of age and head pose for adult face verification,2011 +166,MORPH Commercial,morph,52.6221571,1.2409136,University of East Anglia,edu,05a0d04693b2a51a8131d195c68ad9f5818b2ce1,citation,http://pdfs.semanticscholar.org/05a0/d04693b2a51a8131d195c68ad9f5818b2ce1.pdf,Dual-reference Face Retrieval: What Does He/She Look Like at Age 'X'?,2017 +167,MORPH Commercial,morph,40.44415295,-79.96243993,University of Pittsburgh,edu,05a0d04693b2a51a8131d195c68ad9f5818b2ce1,citation,http://pdfs.semanticscholar.org/05a0/d04693b2a51a8131d195c68ad9f5818b2ce1.pdf,Dual-reference Face Retrieval: What Does He/She Look Like at Age 'X'?,2017 +168,MORPH Commercial,morph,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,387b54cf6c186c12d83f95df6bd458c5eb1254ee,citation,https://doi.org/10.1109/VCIP.2017.8305123,Deep probabilities for age estimation,2017 +169,MORPH Commercial,morph,35.97320905,-78.89755054,North Carolina Central University,edu,1ca1b4f787712ede215030d22a0eea41534a601e,citation,https://doi.org/10.1109/CVPRW.2010.5543609,Human age estimation: What is the influence across race and gender?,2010 +170,MORPH Commercial,morph,39.65404635,-79.96475355,West Virginia University,edu,1ca1b4f787712ede215030d22a0eea41534a601e,citation,https://doi.org/10.1109/CVPRW.2010.5543609,Human age estimation: What is the influence across race and gender?,2010 +171,MORPH Commercial,morph,1.3484104,103.68297965,Nanyang Technological University,edu,b6a23f72007cb40223d7e1e1cc47e466716de945,citation,https://doi.org/10.1109/CVPRW.2010.5544598,Ordinary preserving manifold analysis for human age estimation,2010 +172,MORPH Commercial,morph,60.7897318,10.6821927,"Norwegian Biometrics Lab, NTNU, Norway",edu,0647c9d56cf11215894d57d677997826b22f6a13,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8401557,Transgender face recognition with off-the-shelf pre-trained CNNs: A comprehensive study,2018 +173,MORPH Commercial,morph,52.3553655,4.9501644,University of Amsterdam,edu,935a7793cbb8f102924fa34fce1049727de865c2,citation,https://doi.org/10.1109/ICIP.2015.7351554,Age estimation under changes in image quality: An experimental study,2015 +174,MORPH Commercial,morph,40.01407945,-105.26695944,"University of Colorado, Boulder",edu,4aabd6db4594212019c9af89b3e66f39f3108aac,citation,http://pdfs.semanticscholar.org/4aab/d6db4594212019c9af89b3e66f39f3108aac.pdf,The Mere Exposure Effect and Classical Conditioning,2015 +175,MORPH Commercial,morph,34.2375581,-77.9270129,University of North Carolina Wilmington,edu,73d15a975b0595e0cc2e0981a9396a89c474dc7e,citation,https://arxiv.org/pdf/1811.03680.pdf,Gender Effect on Face Recognition for a Large Longitudinal Database,2018 +176,MORPH Commercial,morph,40.00229045,116.32098908,Tsinghua University,edu,51bb86dc8748088a198b216f7e97616634147388,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6890496,Face age estimation by using Bisection Search Tree,2013 +177,MORPH Commercial,morph,1.3037257,103.7737763,"Advanced Digital Sciences Center, Singapore",edu,8cffe360a05085d4bcba111a3a3cd113d96c0369,citation,http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126248,Learning universal multi-view age estimator using video context,2011 +178,MORPH Commercial,morph,1.3170417,103.8321041,"Facebook, Singapore",company,8cffe360a05085d4bcba111a3a3cd113d96c0369,citation,http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126248,Learning universal multi-view age estimator using video context,2011 +179,MORPH Commercial,morph,1.2962018,103.77689944,National University of Singapore,edu,8cffe360a05085d4bcba111a3a3cd113d96c0369,citation,http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126248,Learning universal multi-view age estimator using video context,2011 +180,MORPH Commercial,morph,23.143197,113.34009651,South China Normal University,edu,dc6ad30c7a4bc79bb06b4725b16e202d3d7d8935,citation,http://doi.org/10.1007/s11042-017-4646-5,Age classification with deep learning face representation,2017 +181,MORPH Commercial,morph,23.0502042,113.39880323,South China University of Technology,edu,dc6ad30c7a4bc79bb06b4725b16e202d3d7d8935,citation,http://doi.org/10.1007/s11042-017-4646-5,Age classification with deep learning face representation,2017 +182,MORPH Commercial,morph,50.0764296,14.41802312,Czech Technical University,edu,023ed32ac3ea6029f09b8c582efbe3866de7d00a,citation,http://pdfs.semanticscholar.org/023e/d32ac3ea6029f09b8c582efbe3866de7d00a.pdf,Discriminative learning from partially annotated examples,2016 +183,MORPH Commercial,morph,35.5167538,139.48342251,Tokyo Institute of Technology,edu,435dc062d565ce87c6c20a5f49430eb9a4b573c4,citation,http://pdfs.semanticscholar.org/435d/c062d565ce87c6c20a5f49430eb9a4b573c4.pdf,Lighting Condition Adaptation for Perceived Age Estimation,2011 +184,MORPH Commercial,morph,42.718568,-84.47791571,Michigan State University,edu,6a5d7d20a8c4993d56bcf702c772aa3f95f99450,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813408,Face recognition with temporal invariance: A 3D aging model,2008 +185,MORPH Commercial,morph,35.97320905,-78.89755054,North Carolina Central University,edu,2a6783ae51d7ee781d584ef9a3eb8ab1997d0489,citation,https://doi.org/10.1109/CVPRW.2010.5543608,A study of large-scale ethnicity estimation with gender and age variations,2010 +186,MORPH Commercial,morph,39.65404635,-79.96475355,West Virginia University,edu,2a6783ae51d7ee781d584ef9a3eb8ab1997d0489,citation,https://doi.org/10.1109/CVPRW.2010.5543608,A study of large-scale ethnicity estimation with gender and age variations,2010 +187,MORPH Commercial,morph,40.00229045,116.32098908,Tsinghua University,edu,a53d13b9110cddb2a5f38b9d7ed69d328e3c6db9,citation,https://doi.org/10.1109/TIP.2015.2481327,Cost-Sensitive Local Binary Feature Learning for Facial Age Estimation,2015 +188,MORPH Commercial,morph,1.3484104,103.68297965,Nanyang Technological University,edu,a53d13b9110cddb2a5f38b9d7ed69d328e3c6db9,citation,https://doi.org/10.1109/TIP.2015.2481327,Cost-Sensitive Local Binary Feature Learning for Facial Age Estimation,2015 +189,MORPH Commercial,morph,42.718568,-84.47791571,Michigan State University,edu,141cb9ee401f223220d3468592effa90f0c255fa,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7815403,Longitudinal Study of Automatic Face Recognition,2015 +190,MORPH Commercial,morph,42.718568,-84.47791571,Michigan State University,edu,e22adcd2a6a7544f017ec875ce8f89d5c59e09c8,citation,https://arxiv.org/pdf/1807.11936.pdf,Gender Privacy: An Ensemble of Semi Adversarial Networks for Confounding Arbitrary Gender Classifiers,2018 +191,MORPH Commercial,morph,25.01682835,121.53846924,National Taiwan University,edu,6ab33fa51467595f18a7a22f1d356323876f8262,citation,http://www.iis.sinica.edu.tw/~kuangyu/OHRank_files/0523.pdf,Ordinal hyperplanes ranker with cost sensitivities for age estimation,2011 +192,MORPH Commercial,morph,25.0410728,121.6147562,Institute of Information Science,edu,6ab33fa51467595f18a7a22f1d356323876f8262,citation,http://www.iis.sinica.edu.tw/~kuangyu/OHRank_files/0523.pdf,Ordinal hyperplanes ranker with cost sensitivities for age estimation,2011 +193,MORPH Commercial,morph,25.0411727,121.6146518,"Academia Sinica, Taiwan",edu,6ab33fa51467595f18a7a22f1d356323876f8262,citation,http://www.iis.sinica.edu.tw/~kuangyu/OHRank_files/0523.pdf,Ordinal hyperplanes ranker with cost sensitivities for age estimation,2011 +194,MORPH Commercial,morph,1.2962018,103.77689944,National University of Singapore,edu,63488398f397b55552f484409b86d812dacde99a,citation,http://pdfs.semanticscholar.org/6348/8398f397b55552f484409b86d812dacde99a.pdf,Learning Universal Multi-view Age Estimator by Video Contexts,2011 +195,MORPH Commercial,morph,40.00229045,116.32098908,Tsinghua University,edu,6adecb82edbf84a0097ff623428f4f1936e31de0,citation,https://doi.org/10.1007/s11760-011-0246-4,Client-specific A-stack model for adult face verification across aging,2011 +196,MORPH Commercial,morph,1.3037257,103.7737763,"Advanced Digital Sciences Center, Singapore",edu,fcb97ede372c5bddde7a61924ac2fd29788c82ce,citation,https://doi.org/10.1109/TSMCC.2012.2192727,Ordinary Preserving Manifold Analysis for Human Age and Head Pose Estimation,2013 +197,MORPH Commercial,morph,1.3484104,103.68297965,Nanyang Technological University,edu,fcb97ede372c5bddde7a61924ac2fd29788c82ce,citation,https://doi.org/10.1109/TSMCC.2012.2192727,Ordinary Preserving Manifold Analysis for Human Age and Head Pose Estimation,2013 +198,MORPH Commercial,morph,36.3697191,127.362537,Korea Advanced Institute of Science and Technology,edu,cb27b45329d61f5f95ed213798d4b2a615e76be2,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8329236,Deep Facial Age Estimation Using Conditional Multitask Learning With Weak Label Expansion,2018 +199,MORPH Commercial,morph,37.2520226,127.0555019,"Samsung SAIT, Korea",company,cb27b45329d61f5f95ed213798d4b2a615e76be2,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8329236,Deep Facial Age Estimation Using Conditional Multitask Learning With Weak Label Expansion,2018 +200,MORPH Commercial,morph,35.14479945,33.90492318,Eastern Mediterranean University,edu,c5421a18583f629b49ca20577022f201692c4f5d,citation,http://pdfs.semanticscholar.org/c542/1a18583f629b49ca20577022f201692c4f5d.pdf,Facial Age Classification using Subpattern-based Approaches,2011 +201,MORPH Commercial,morph,40.0044795,116.370238,Chinese Academy of Sciences,edu,68c4a1d438ea1c6dfba92e3aee08d48f8e7f7090,citation,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Liu_AgeNet_Deeply_Learned_ICCV_2015_paper.pdf,AgeNet: Deeply Learned Regressor and Classifier for Robust Apparent Age Estimation,2015 +202,MORPH Commercial,morph,31.32235655,121.38400941,Shanghai University,edu,5f0d4a0b5f72d8700cdf8cb179263a8fa866b59b,citation,https://pdfs.semanticscholar.org/5f0d/4a0b5f72d8700cdf8cb179263a8fa866b59b.pdf,Memo No . 85 06 / 2018 Deep Regression Forests for Age Estimation,2018 +203,MORPH Commercial,morph,24.96841805,121.19139696,National Central University,edu,c58ece1a3fa23608f022e424ec5a93cddda31308,citation,https://doi.org/10.1109/JSYST.2014.2325957,Extraction of Visual Facial Features for Health Management,2016 +204,MORPH Commercial,morph,50.0764296,14.41802312,Czech Technical University,edu,56e25358ebfaf8a8b3c7c33ed007e24f026065d0,citation,https://doi.org/10.1007/s10994-015-5541-9,V-shaped interval insensitive loss for ordinal classification,2015 +205,MORPH Commercial,morph,5.7648848,102.6281702,"University Sultan Zainal Abidin, Malaysia",edu,3337cfc3de2c16dee6f7cbeda5f263409a9ad81e,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8398675,Age prediction on face features via multiple classifiers,2018 +206,MORPH Commercial,morph,1.2962018,103.77689944,National University of Singapore,edu,2836d68c86f29bb87537ea6066d508fde838ad71,citation,http://arxiv.org/pdf/1510.06503v1.pdf,Personalized Age Progression with Aging Dictionary,2015 +207,MORPH Commercial,morph,32.0565957,118.77408833,Nanjing University,edu,2836d68c86f29bb87537ea6066d508fde838ad71,citation,http://arxiv.org/pdf/1510.06503v1.pdf,Personalized Age Progression with Aging Dictionary,2015 +208,MORPH Commercial,morph,22.42031295,114.20788644,Chinese University of Hong Kong,edu,55966926e7c28b1eee1c7eb7a0b11b10605a1af0,citation,http://pdfs.semanticscholar.org/baa8/bdeb5aa545af5b5f43efaf9dda08490da0bc.pdf,Surpassing Human-Level Face Verification Performance on LFW with GaussianFace,2015 +209,MORPH Commercial,morph,40.0044795,116.370238,Chinese Academy of Sciences,edu,d492dbfaa42b4f8b8a74786d7343b3be6a3e9a1d,citation,https://pdfs.semanticscholar.org/d492/dbfaa42b4f8b8a74786d7343b3be6a3e9a1d.pdf,Deep Cost-Sensitive and Order-Preserving Feature Learning for Cross-Population Age Estimation,0 +210,MORPH Commercial,morph,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,d492dbfaa42b4f8b8a74786d7343b3be6a3e9a1d,citation,https://pdfs.semanticscholar.org/d492/dbfaa42b4f8b8a74786d7343b3be6a3e9a1d.pdf,Deep Cost-Sensitive and Order-Preserving Feature Learning for Cross-Population Age Estimation,0 +211,MORPH Commercial,morph,34.67567405,33.04577648,Cyprus University of Technology,edu,fa518a033b1f6299d1826389bd1520cf52291b56,citation,https://pdfs.semanticscholar.org/fa51/8a033b1f6299d1826389bd1520cf52291b56.pdf,Facial Age Simulation using Age-specific 3D Models and Recursive PCA,2013 +212,MORPH Commercial,morph,38.83133325,-77.30798839,George Mason University,edu,1c147261f5ab1b8ee0a54021a3168fa191096df8,citation,http://pdfs.semanticscholar.org/1c14/7261f5ab1b8ee0a54021a3168fa191096df8.pdf,Face Recognition across Time Lapse Using Convolutional Neural Networks,2016 +213,MORPH Commercial,morph,32.05765485,118.7550004,HoHai University,edu,b84b7b035c574727e4c30889e973423fe15560d7,citation,http://pdfs.semanticscholar.org/b84b/7b035c574727e4c30889e973423fe15560d7.pdf,Human Age Estimation Using Ranking SVM,2012 +214,MORPH Commercial,morph,40.0044795,116.370238,Chinese Academy of Sciences,edu,b84b7b035c574727e4c30889e973423fe15560d7,citation,http://pdfs.semanticscholar.org/b84b/7b035c574727e4c30889e973423fe15560d7.pdf,Human Age Estimation Using Ranking SVM,2012 +215,MORPH Commercial,morph,39.6810328,-75.7540184,University of Delaware,edu,19da9f3532c2e525bf92668198b8afec14f9efea,citation,http://pdfs.semanticscholar.org/19da/9f3532c2e525bf92668198b8afec14f9efea.pdf,Challenge: Face verification across age progression using real-world data,2011 +216,MORPH Commercial,morph,39.95472495,-75.15346905,Temple University,edu,f24e379e942e134d41c4acec444ecf02b9d0d3a9,citation,http://pdfs.semanticscholar.org/f24e/379e942e134d41c4acec444ecf02b9d0d3a9.pdf,Analysis of Facial Images across Age Progression by Humans,2011 +217,MORPH Commercial,morph,39.65404635,-79.96475355,West Virginia University,edu,f24e379e942e134d41c4acec444ecf02b9d0d3a9,citation,http://pdfs.semanticscholar.org/f24e/379e942e134d41c4acec444ecf02b9d0d3a9.pdf,Analysis of Facial Images across Age Progression by Humans,2011 +218,MORPH Commercial,morph,40.00229045,116.32098908,Tsinghua University,edu,51f626540860ad75b68206025a45466a6d087aa6,citation,https://doi.org/10.1109/ICIP.2017.8296595,Cluster convolutional neural networks for facial age estimation,2017 +219,MORPH Commercial,morph,37.4102193,-122.05965487,Carnegie Mellon University,edu,452ea180cf4d08d7500fc4bc046fd7141fd3d112,citation,https://doi.org/10.1109/BTAS.2012.6374569,A robust approach to facial ethnicity classification on large scale face databases,2012 +220,MORPH Commercial,morph,47.3764534,8.54770931,ETH Zürich,edu,2facf3e85240042a02f289a0d40fee376c478d0f,citation,https://doi.org/10.1109/BTAS.2010.5634544,Aging face verification in score-age space using single reference image template,2010 +221,MORPH Commercial,morph,38.88140235,121.52281098,Dalian University of Technology,edu,ed70d1a9435c0b32c0c75c1a062f4f07556f7016,citation,https://doi.org/10.1109/ICIP.2015.7350774,Correlated warped Gaussian processes for gender-specific age estimation,2015 +222,MORPH Commercial,morph,40.0141905,-83.0309143,University of Electronic Science and Technology of China,edu,ed70d1a9435c0b32c0c75c1a062f4f07556f7016,citation,https://doi.org/10.1109/ICIP.2015.7350774,Correlated warped Gaussian processes for gender-specific age estimation,2015 +223,MORPH Commercial,morph,1.2962018,103.77689944,National University of Singapore,edu,0e5557a0cc58194ad53fab5dd6f4d4195d19ce4e,citation,https://doi.org/10.1109/TMM.2015.2500730,Deep Aging Face Verification With Large Gaps,2016 +224,MORPH Commercial,morph,51.52344665,-0.25973535,"North Acton, London",edu,0e5557a0cc58194ad53fab5dd6f4d4195d19ce4e,citation,https://doi.org/10.1109/TMM.2015.2500730,Deep Aging Face Verification With Large Gaps,2016 +225,MORPH Commercial,morph,31.846918,117.29053367,Hefei University of Technology,edu,0e5557a0cc58194ad53fab5dd6f4d4195d19ce4e,citation,https://doi.org/10.1109/TMM.2015.2500730,Deep Aging Face Verification With Large Gaps,2016 +226,MORPH Commercial,morph,29.58333105,-98.61944505,University of Texas at San Antonio,edu,f2896dd2701fbb3564492a12c64f11a5ad456a67,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5495414,Cross-database age estimation based on transfer learning,2010 +227,MORPH Commercial,morph,34.1235825,108.83546,Xidian University,edu,f2896dd2701fbb3564492a12c64f11a5ad456a67,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5495414,Cross-database age estimation based on transfer learning,2010 +228,MORPH Commercial,morph,56.66340325,12.87929727,Halmstad University,edu,9cda3e56cec21bd8f91f7acfcefc04ac10973966,citation,https://doi.org/10.1109/IWBF.2016.7449688,"Periocular biometrics: databases, algorithms and directions",2016 +229,MORPH Commercial,morph,34.2375581,-77.9270129,University of North Carolina Wilmington,edu,13aef395f426ca8bd93640c9c3f848398b189874,citation,https://pdfs.semanticscholar.org/13ae/f395f426ca8bd93640c9c3f848398b189874.pdf,1 Image Preprocessing and Complete 2 DPCA with Feature Extraction for Gender Recognition NSF REU 2017 : Statistical Learning and Data Mining,2017 +230,MORPH Commercial,morph,24.7925484,120.9951183,National Tsing Hua University,edu,cfa40560fa74b2fb5c26bdd6ea7c610ba5130e2f,citation,https://doi.org/10.1109/TIFS.2013.2286265,Subspace Learning for Facial Age Estimation Via Pairwise Age Ranking,2013 +231,MORPH Commercial,morph,58.38131405,26.72078081,University of Tartu,edu,1b248ed8e7c9514648cd598960fadf9ab17e7fe8,citation,https://pdfs.semanticscholar.org/1b24/8ed8e7c9514648cd598960fadf9ab17e7fe8.pdf,"From apparent to real age: gender, age, ethnic, makeup, and expression bias analysis in real age estimation",0 +232,MORPH Commercial,morph,41.3868913,2.16352385,University of Barcelona,edu,1b248ed8e7c9514648cd598960fadf9ab17e7fe8,citation,https://pdfs.semanticscholar.org/1b24/8ed8e7c9514648cd598960fadf9ab17e7fe8.pdf,"From apparent to real age: gender, age, ethnic, makeup, and expression bias analysis in real age estimation",0 +233,MORPH Commercial,morph,39.65404635,-79.96475355,West Virginia University,edu,86a8b3d0f753cb49ac3250fa14d277983e30a4b7,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2013.75,Exploiting Unlabeled Ages for Aging Pattern Analysis on a Large Database,2013 +234,MORPH Commercial,morph,34.2239869,-77.8701325,"UNCW, USA",edu,2b5cb5466eecb131f06a8100dcaf0c7a0e30d391,citation,http://doi.acm.org/10.1145/1924559.1924608,A comparative study of active appearance model annotation schemes for the face,2010 +235,MORPH Commercial,morph,42.718568,-84.47791571,Michigan State University,edu,fc798314994bf94d1cde8d615ba4d5e61b6268b6,citation,http://pdfs.semanticscholar.org/fc79/8314994bf94d1cde8d615ba4d5e61b6268b6.pdf,"Face Recognition : face in video , age invariance , and facial marks",2009 +236,MORPH Commercial,morph,24.12084345,120.67571165,National Chung Hsing University,edu,635d2696aa597a278dd6563f079be06aa76a33c0,citation,https://doi.org/10.1109/ICIP.2016.7532429,Age estimation via fusion of multiple binary age grouping systems,2016 +237,MORPH Commercial,morph,25.01682835,121.53846924,National Taiwan University,edu,635d2696aa597a278dd6563f079be06aa76a33c0,citation,https://doi.org/10.1109/ICIP.2016.7532429,Age estimation via fusion of multiple binary age grouping systems,2016 +238,MORPH Commercial,morph,25.0411727,121.6146518,"Academia Sinica, Taiwan",edu,635d2696aa597a278dd6563f079be06aa76a33c0,citation,https://doi.org/10.1109/ICIP.2016.7532429,Age estimation via fusion of multiple binary age grouping systems,2016 +239,MORPH Commercial,morph,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,36486944b4feeb88c0499fecd253c5a53034a23f,citation,https://doi.org/10.1109/CISP-BMEI.2017.8301986,Deep feature selection and projection for cross-age face retrieval,2017 +240,MORPH Commercial,morph,1.2988926,103.7873107,"Institute for Infocomm Research, Singapore",edu,85f7f03b79d03da5fae3a7f79d9aac228a635166,citation,https://doi.org/10.1109/WACV.2009.5403085,Age categorization via ECOC with fused gabor and LBP features,2009 +241,MORPH Commercial,morph,39.6810328,-75.7540184,University of Delaware,edu,aee3427d0814d8a398fd31f4f46941e9e5488d83,citation,http://dl.acm.org/citation.cfm?id=1924573,Face verification with aging using AdaBoost and local binary patterns,2010 +242,MORPH Commercial,morph,23.09461185,113.28788994,Sun Yat-Sen University,edu,d1b5b3e4b803dc4e50c5b80c1bc69c6d98751698,citation,https://doi.org/10.1109/LSP.2017.2661983,Modified Hidden Factor Analysis for Cross-Age Face Recognition,2017 +243,MORPH Commercial,morph,39.65404635,-79.96475355,West Virginia University,edu,55bc7abcef8266d76667896bbc652d081d00f797,citation,http://www.cse.msu.edu/~rossarun/pubs/ChenCosmeticsGenderAge_VISAPP2014.pdf,Impact of facial cosmetics on automatic gender and age estimation algorithms,2014 +244,MORPH Commercial,morph,42.718568,-84.47791571,Michigan State University,edu,55bc7abcef8266d76667896bbc652d081d00f797,citation,http://www.cse.msu.edu/~rossarun/pubs/ChenCosmeticsGenderAge_VISAPP2014.pdf,Impact of facial cosmetics on automatic gender and age estimation algorithms,2014 +245,MORPH Commercial,morph,39.65404635,-79.96475355,West Virginia University,edu,7a65fc9e78eff3ab6062707deaadde024d2fad40,citation,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Zhu_A_Study_on_ICCV_2015_paper.pdf,A Study on Apparent Age Estimation,2015 +246,MORPH Commercial,morph,42.357757,-83.06286711,Wayne State University,edu,28d99dc2d673d62118658f8375b414e5192eac6f,citation,http://www.cs.wayne.edu/~mdong/cvpr17.pdf,Using Ranking-CNN for Age Estimation,2017 +247,MORPH Commercial,morph,37.4102193,-122.05965487,Carnegie Mellon University,edu,ec05078be14a11157ac0e1c6b430ac886124589b,citation,http://pdfs.semanticscholar.org/ec05/078be14a11157ac0e1c6b430ac886124589b.pdf,Longitudinal Face Aging in the Wild - Recent Deep Learning Approaches,2018 +248,MORPH Commercial,morph,45.57022705,-122.63709346,Concordia University,edu,ec05078be14a11157ac0e1c6b430ac886124589b,citation,http://pdfs.semanticscholar.org/ec05/078be14a11157ac0e1c6b430ac886124589b.pdf,Longitudinal Face Aging in the Wild - Recent Deep Learning Approaches,2018 +249,MORPH Commercial,morph,46.5190557,6.5667576,"Swiss Federal Institute of Technology Lausanne, Switzerland",edu,d7a84db2a1bf7b97657b0250f354f249394dd700,citation,https://doi.org/10.1109/ICIP.2010.5653518,Global and local feature based multi-classifier A-stack model for aging face identification,2010 +250,MORPH Commercial,morph,39.65404635,-79.96475355,West Virginia University,edu,d3c004125c71942846a9b32ae565c5216c068d1e,citation,http://pdfs.semanticscholar.org/d3c0/04125c71942846a9b32ae565c5216c068d1e.pdf,Recognizing Age-Separated Face Images: Humans and Machines,2014 +251,MORPH Commercial,morph,52.3553655,4.9501644,University of Amsterdam,edu,999289b0ef76c4c6daa16a4f42df056bf3d68377,citation,http://pdfs.semanticscholar.org/9992/89b0ef76c4c6daa16a4f42df056bf3d68377.pdf,The Role of Color and Contrast in Facial Age Estimation,2014 +252,MORPH Commercial,morph,51.99882735,4.37396037,Delft University of Technology,edu,999289b0ef76c4c6daa16a4f42df056bf3d68377,citation,http://pdfs.semanticscholar.org/9992/89b0ef76c4c6daa16a4f42df056bf3d68377.pdf,The Role of Color and Contrast in Facial Age Estimation,2014 +253,MORPH Commercial,morph,28.5456282,77.2731505,"IIIT Delhi, India",edu,f726738954e7055bb3615fa7e8f59f136d3e0bdc,citation,https://arxiv.org/pdf/1803.07385.pdf,Are you eligible? Predicting adulthood from face images via class specific mean autoencoder,2018 +254,MORPH Commercial,morph,1.2962018,103.77689944,National University of Singapore,edu,b9d68dbeb8e5fdc5984b49a317ea6798b378e5ae,citation,http://doi.acm.org/10.1145/2733373.2807962,What Shall I Look Like after N Years?,2015 +255,MORPH Commercial,morph,32.0565957,118.77408833,Nanjing University,edu,b9d68dbeb8e5fdc5984b49a317ea6798b378e5ae,citation,http://doi.acm.org/10.1145/2733373.2807962,What Shall I Look Like after N Years?,2015 +256,MORPH Commercial,morph,45.42580475,-75.68740118,University of Ottawa,edu,16820ccfb626dcdc893cc7735784aed9f63cbb70,citation,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W12/papers/Azarmehr_Real-Time_Embedded_Age_2015_CVPR_paper.pdf,Real-time embedded age and gender classification in unconstrained video,2015 +257,MORPH Commercial,morph,35.0274996,135.78154513,University of Caen,edu,0ad8149318912b5449085187eb3521786a37bc78,citation,http://arxiv.org/abs/1604.02975,CP-mtML: Coupled Projection Multi-Task Metric Learning for Large Scale Face Retrieval,2016 +258,MORPH Commercial,morph,51.44415765,7.26096541,Ruhr-University Bochum,edu,7e1ea2679a110241ed0dd38ff45cd4dfeb7a8e83,citation,http://pdfs.semanticscholar.org/7e1e/a2679a110241ed0dd38ff45cd4dfeb7a8e83.pdf,Extensions of Hierarchical Slow Feature Analysis for Efficient Classification and Regression on High-Dimensional Data,2017 +259,MORPH Commercial,morph,30.5097537,114.4062881,Huazhong University of Science and Technology,edu,2e27667421a7eeab278e0b761db4d2c725683c3f,citation,https://doi.org/10.1007/s11042-013-1815-z,Effective human age estimation using a two-stage approach based on Lie Algebrized Gaussians feature,2013 +260,MORPH Commercial,morph,32.0565957,118.77408833,Nanjing University,edu,0c741fa0966ba3ee4fc326e919bf2f9456d0cd74,citation,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.51,Facial Age Estimation by Learning from Label Distributions,2010 +261,MORPH Commercial,morph,32.0575279,118.78682252,Southeast University,edu,0c741fa0966ba3ee4fc326e919bf2f9456d0cd74,citation,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.51,Facial Age Estimation by Learning from Label Distributions,2010 +262,MORPH Commercial,morph,-37.78397455,144.95867433,Monash University,edu,0c741fa0966ba3ee4fc326e919bf2f9456d0cd74,citation,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.51,Facial Age Estimation by Learning from Label Distributions,2010 +263,MORPH Commercial,morph,1.2962018,103.77689944,National University of Singapore,edu,fca9ebaa30d69ccec8bb577c31693c936c869e72,citation,https://arxiv.org/pdf/1809.00338.pdf,Look Across Elapse: Disentangled Representation Learning and Photorealistic Cross-Age Face Synthesis for Age-Invariant Face Recognition,2018 +264,MORPH Commercial,morph,40.0044795,116.370238,Chinese Academy of Sciences,edu,fca9ebaa30d69ccec8bb577c31693c936c869e72,citation,https://arxiv.org/pdf/1809.00338.pdf,Look Across Elapse: Disentangled Representation Learning and Photorealistic Cross-Age Face Synthesis for Age-Invariant Face Recognition,2018 +265,MORPH Commercial,morph,49.10184375,8.4331256,Karlsruhe Institute of Technology,edu,cfdc4d0f8e1b4b9ced35317d12b4229f2e3311ab,citation,https://pdfs.semanticscholar.org/cfdc/4d0f8e1b4b9ced35317d12b4229f2e3311ab.pdf,Quaero at TRECVID 2010: Semantic Indexing,2010 +266,MORPH Commercial,morph,42.718568,-84.47791571,Michigan State University,edu,02d650d8a3a9daaba523433fbe93705df0a7f4b1,citation,http://pdfs.semanticscholar.org/02d6/50d8a3a9daaba523433fbe93705df0a7f4b1.pdf,How Does Aging Affect Facial Components?,2012 +267,MORPH Commercial,morph,34.67567405,33.04577648,Cyprus University of Technology,edu,70db3a0d2ca8a797153cc68506b8650908cb0ada,citation,http://pdfs.semanticscholar.org/70db/3a0d2ca8a797153cc68506b8650908cb0ada.pdf,An Overview of Research Activities in Facial Age Estimation Using the FG-NET Aging Database,2014 +268,MORPH Commercial,morph,22.5447154,113.9357164,Tencent,company,a2d1818eb461564a5153c74028e53856cf0b40fd,citation,https://arxiv.org/pdf/1810.07599.pdf,Orthogonal Deep Features Decomposition for Age-Invariant Face Recognition,2018 +269,MORPH Commercial,morph,57.6252103,39.8845656,Yaroslavl State University,edu,05318a267226f6d855d83e9338eaa9e718b2a8dd,citation,https://fruct.org/publications/fruct16/files/Khr.pdf,Age estimation from face images: challenging problem for audience measurement systems,2014 +270,MORPH Commercial,morph,41.5381124,2.4447406,"EUP Mataró, Spain",edu,1f5725a4a2eb6cdaefccbc20dccadf893936df12,citation,https://doi.org/10.1109/CCST.2012.6393544,On the relevance of age in handwritten biometric recognition,2012 +271,MORPH Commercial,morph,34.67567405,33.04577648,Cyprus University of Technology,edu,876583a059154def7a4bc503b21542f80859affd,citation,https://doi.org/10.1109/IWBF.2016.7449697,On the analysis of factors influencing the performance of facial age progression,2016 +272,MORPH Commercial,morph,-35.0636071,147.3552234,Charles Sturt University,edu,2e231f1e7e641dd3619bec59e14d02e91360ac01,citation,https://arxiv.org/pdf/1807.10421.pdf,Fusion Network for Face-Based Age Estimation,2018 +273,MORPH Commercial,morph,51.3791442,-2.3252332,University of Bath,edu,2e231f1e7e641dd3619bec59e14d02e91360ac01,citation,https://arxiv.org/pdf/1807.10421.pdf,Fusion Network for Face-Based Age Estimation,2018 +274,MORPH Commercial,morph,40.0044795,116.370238,Chinese Academy of Sciences,edu,56359d2b4508cc267d185c1d6d310a1c4c2cc8c2,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2015.7298618,Shape driven kernel adaptation in Convolutional Neural Network for robust facial trait recognition,2015 +275,MORPH Commercial,morph,39.9041999,116.4073963,Chinese Academy of Science,edu,56359d2b4508cc267d185c1d6d310a1c4c2cc8c2,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2015.7298618,Shape driven kernel adaptation in Convolutional Neural Network for robust facial trait recognition,2015 +276,MORPH Commercial,morph,1.2962018,103.77689944,National University of Singapore,edu,56359d2b4508cc267d185c1d6d310a1c4c2cc8c2,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2015.7298618,Shape driven kernel adaptation in Convolutional Neural Network for robust facial trait recognition,2015 +277,MORPH Commercial,morph,32.0565957,118.77408833,Nanjing University,edu,a6e43b73f9f87588783988333997a81b4487e2d5,citation,http://pdfs.semanticscholar.org/a6e4/3b73f9f87588783988333997a81b4487e2d5.pdf,Facial Age Estimation by Total Ordering Preserving Projection,2016 +278,MORPH Commercial,morph,1.2988926,103.7873107,"Institution for Infocomm Research, Singapore",edu,8229f2735a0db0ad41f4d7252129311f06959907,citation,https://doi.org/10.1109/TIP.2011.2106794,Active Learning for Solving the Incomplete Data Problem in Facial Age Classification by the Furthest Nearest-Neighbor Criterion,2011 +279,MORPH Commercial,morph,1.3484104,103.68297965,Nanyang Technological University,edu,8229f2735a0db0ad41f4d7252129311f06959907,citation,https://doi.org/10.1109/TIP.2011.2106794,Active Learning for Solving the Incomplete Data Problem in Facial Age Classification by the Furthest Nearest-Neighbor Criterion,2011 +280,MORPH Commercial,morph,39.2899685,-76.62196103,University of Maryland,edu,963a004e208ce4bd26fa79a570af61d31651b3c3,citation,https://doi.org/10.1016/j.jvlc.2009.01.011,Computational methods for modeling facial aging: A survey,2009 +281,MORPH Commercial,morph,40.48256135,-3.6906079,Universidad Autonoma de Madrid,edu,4b5ff8c67f3496a414f94e35cb35a601ec98e5cf,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6547306,Understanding the discrimination power of facial regions in forensic casework,2013 +282,MORPH Commercial,morph,40.4445565,-3.7122785,"Dirección General de la Guardia Civil, Madrid, Spain",edu,4b5ff8c67f3496a414f94e35cb35a601ec98e5cf,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6547306,Understanding the discrimination power of facial regions in forensic casework,2013 +283,MORPH Commercial,morph,-37.8087465,144.9638875,RMIT University,edu,c49075ead6eb07ede5ada4fe372899bd0cfb83ac,citation,https://doi.org/10.1109/ICSPCS.2015.7391782,Multi-stage classification network for automatic age estimation from facial images,2015 +284,MORPH Commercial,morph,34.2375581,-77.9270129,University of North Carolina Wilmington,edu,00301c250d667700276b1e573640ff2fd7be574d,citation,https://doi.org/10.1109/BTAS.2014.6996242,Establishing a test set and initial comparisons for quantitatively evaluating synthetic age progression for adult aging,2014 diff --git a/site/datasets/final/morph_nc.csv b/site/datasets/final/morph_nc.csv new file mode 100644 index 00000000..6ff0320b --- /dev/null +++ b/site/datasets/final/morph_nc.csv @@ -0,0 +1,286 @@ +index,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,MORPH Non-Commercial,morph_nc,0.0,0.0,,,9055b155cbabdce3b98e16e5ac9c0edf00f9552f,main,http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78,MORPH: a longitudinal image database of normal adult age-progression,2006 +1,MORPH Non-Commercial,morph_nc,34.80809035,135.45785218,Osaka University,edu,dad6b36fd515bda801f3d22a462cc62348f6aad8,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6117531,Gait-based age estimation using a whole-generation gait database,2011 +2,MORPH Non-Commercial,morph_nc,40.0044795,116.370238,Chinese Academy of Sciences,edu,ddd0f1c53f76d7fc20e11b7e33bbdc0437516d2b,citation,https://doi.org/10.1109/ICDSP.2016.7868598,Deep learning-based learning to rank with ties for image re-ranking,2016 +3,MORPH Non-Commercial,morph_nc,39.1118774,117.3497451,Civil Aviation University of China,edu,ddd0f1c53f76d7fc20e11b7e33bbdc0437516d2b,citation,https://doi.org/10.1109/ICDSP.2016.7868598,Deep learning-based learning to rank with ties for image re-ranking,2016 +4,MORPH Non-Commercial,morph_nc,25.0410728,121.6147562,Institute of Information Science,edu,4c71b0cdb6b80889b976e8eb4457942bd4dd7b66,citation,https://doi.org/10.1109/TIP.2014.2387379,A Learning Framework for Age Rank Estimation Based on Face Images With Scattering Transform,2015 +5,MORPH Non-Commercial,morph_nc,51.0267513,-1.3972576,"IBM Hursley Labs, UK",company,7123e510dea783035b02f6c35e35a1a09677c5ab,citation,https://doi.org/10.1109/ICPR.2016.7900297,Back to the future: A fully automatic method for robust age progression,2016 +6,MORPH Non-Commercial,morph_nc,35.9042272,-78.85565763,"IBM Research, North Carolina",company,7123e510dea783035b02f6c35e35a1a09677c5ab,citation,https://doi.org/10.1109/ICPR.2016.7900297,Back to the future: A fully automatic method for robust age progression,2016 +7,MORPH Non-Commercial,morph_nc,51.49887085,-0.17560797,Imperial College London,edu,7123e510dea783035b02f6c35e35a1a09677c5ab,citation,https://doi.org/10.1109/ICPR.2016.7900297,Back to the future: A fully automatic method for robust age progression,2016 +8,MORPH Non-Commercial,morph_nc,35.5167538,139.48342251,Tokyo Institute of Technology,edu,3083d2c6d4f456e01cbb72930dc2207af98a6244,citation,http://pdfs.semanticscholar.org/3083/d2c6d4f456e01cbb72930dc2207af98a6244.pdf,Perceived Age Estimation from Face Images,2011 +9,MORPH Non-Commercial,morph_nc,41.3868913,2.16352385,University of Barcelona,edu,500fbe18afd44312738cab91b4689c12b4e0eeee,citation,http://www.maia.ub.es/~sergio/linked/ijcnn_age_and_cultural_2015.pdf,ChaLearn looking at people 2015 new competitions: Age estimation and cultural event recognition,2015 +10,MORPH Non-Commercial,morph_nc,45.4312742,12.3265377,University of Venezia,edu,500fbe18afd44312738cab91b4689c12b4e0eeee,citation,http://www.maia.ub.es/~sergio/linked/ijcnn_age_and_cultural_2015.pdf,ChaLearn looking at people 2015 new competitions: Age estimation and cultural event recognition,2015 +11,MORPH Non-Commercial,morph_nc,41.10427915,29.02231159,Istanbul Technical University,edu,fd53be2e0a9f33080a9db4b5a5e416e24ae8e198,citation,https://arxiv.org/pdf/1606.02909.pdf,Apparent Age Estimation Using Ensemble of Deep Learning Models,2016 +12,MORPH Non-Commercial,morph_nc,40.6341322,-8.6599726,"University of Beira Interior, Portugal",edu,81c21f4aafab39b7f5965829ec9e0f828d6a6182,citation,https://doi.org/10.1109/BTAS.2015.7358744,Acquiring high-resolution face images in outdoor environments: A master-slave calibration algorithm,2015 +13,MORPH Non-Commercial,morph_nc,42.36782045,-71.12666653,Harvard University,edu,0ba402af3b8682e2aa89f76bd823ddffdf89fa0a,citation,http://pdfs.semanticscholar.org/c0d8/4377168c554cb8e83099bed940091fe49dec.pdf,Squared Earth Mover's Distance-based Loss for Training Deep Neural Networks,2016 +14,MORPH Non-Commercial,morph_nc,40.9153196,-73.1270626,Stony Brook University,edu,0ba402af3b8682e2aa89f76bd823ddffdf89fa0a,citation,http://pdfs.semanticscholar.org/c0d8/4377168c554cb8e83099bed940091fe49dec.pdf,Squared Earth Mover's Distance-based Loss for Training Deep Neural Networks,2016 +15,MORPH Non-Commercial,morph_nc,40.47913175,-74.43168868,Rutgers University,edu,31f1e711fcf82c855f27396f181bf5e565a2f58d,citation,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.54,Unconstrained Age Estimation with Deep Convolutional Neural Networks,2015 +16,MORPH Non-Commercial,morph_nc,39.2899685,-76.62196103,University of Maryland,edu,31f1e711fcf82c855f27396f181bf5e565a2f58d,citation,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.54,Unconstrained Age Estimation with Deep Convolutional Neural Networks,2015 +17,MORPH Non-Commercial,morph_nc,39.65404635,-79.96475355,West Virginia University,edu,af12a79892bd030c19dfea392f7a7ccb0e7ebb72,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247972,A study on human age estimation under facial expression changes,2012 +18,MORPH Non-Commercial,morph_nc,23.09461185,113.28788994,Sun Yat-Sen University,edu,2d7c2c015053fff5300515a7addcd74b523f3f66,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8323422,Age-Related Factor Guided Joint Task Modeling Convolutional Neural Network for Cross-Age Face Recognition,2018 +19,MORPH Non-Commercial,morph_nc,40.0044795,116.370238,Chinese Academy of Sciences,edu,b234d429c9ea682e54fca52f4b889b3170f65ffc,citation,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2012.22,A Concatenational Graph Evolution Aging Model,2012 +20,MORPH Non-Commercial,morph_nc,39.9922379,116.30393816,Peking University,edu,b234d429c9ea682e54fca52f4b889b3170f65ffc,citation,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2012.22,A Concatenational Graph Evolution Aging Model,2012 +21,MORPH Non-Commercial,morph_nc,40.00229045,116.32098908,Tsinghua University,edu,b234d429c9ea682e54fca52f4b889b3170f65ffc,citation,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2012.22,A Concatenational Graph Evolution Aging Model,2012 +22,MORPH Non-Commercial,morph_nc,30.19331415,120.11930822,Zhejiang University,edu,ee65cee5151928c63d3ef36fcbb582fabb2b6d2c,citation,https://doi.org/10.1109/LSP.2016.2602538,Structure-Aware Slow Feature Analysis for Age Estimation,2016 +23,MORPH Non-Commercial,morph_nc,40.0141905,-83.0309143,University of Electronic Science and Technology of China,edu,ee65cee5151928c63d3ef36fcbb582fabb2b6d2c,citation,https://doi.org/10.1109/LSP.2016.2602538,Structure-Aware Slow Feature Analysis for Age Estimation,2016 +24,MORPH Non-Commercial,morph_nc,40.0044795,116.370238,Chinese Academy of Sciences,edu,288964068cd87d97a98b8bc927d6e0d2349458a2,citation,https://pdfs.semanticscholar.org/2889/64068cd87d97a98b8bc927d6e0d2349458a2.pdf,Mean-Variance Loss for Deep Age Estimation from a Face,0 +25,MORPH Non-Commercial,morph_nc,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,288964068cd87d97a98b8bc927d6e0d2349458a2,citation,https://pdfs.semanticscholar.org/2889/64068cd87d97a98b8bc927d6e0d2349458a2.pdf,Mean-Variance Loss for Deep Age Estimation from a Face,0 +26,MORPH Non-Commercial,morph_nc,39.65404635,-79.96475355,West Virginia University,edu,cd63759842a56bd2ede3999f6e11a74ccbec318b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995404,Simultaneous dimensionality reduction and human age estimation via kernel partial least squares regression,2011 +27,MORPH Non-Commercial,morph_nc,28.5456282,77.2731505,"IIIT Delhi, India",edu,ffc81ced9ee8223ab0adb18817321cbee99606e6,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791157,A multibiometrics-based CAPTCHA for improved online security,2016 +28,MORPH Non-Commercial,morph_nc,39.65404635,-79.96475355,West Virginia University,edu,ffc81ced9ee8223ab0adb18817321cbee99606e6,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791157,A multibiometrics-based CAPTCHA for improved online security,2016 +29,MORPH Non-Commercial,morph_nc,41.25713055,-72.9896696,Yale University,edu,df7312cbabb7d75d915ba0d91dea77100ded5c56,citation,https://arxiv.org/pdf/1811.06446.pdf,Preliminary Studies on a Large Face Database,2018 +30,MORPH Non-Commercial,morph_nc,29.6328784,-82.3490133,University of Florida,edu,df7312cbabb7d75d915ba0d91dea77100ded5c56,citation,https://arxiv.org/pdf/1811.06446.pdf,Preliminary Studies on a Large Face Database,2018 +31,MORPH Non-Commercial,morph_nc,31.83907195,117.26420748,University of Science and Technology of China,edu,56c700693b63e3da3b985777da6d9256e2e0dc21,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_079.pdf,Global refinement of random forest,2015 +32,MORPH Non-Commercial,morph_nc,40.00229045,116.32098908,Tsinghua University,edu,1e344b99583b782e3eaf152cdfa15f217b781181,citation,http://doi.acm.org/10.1145/2499788.2499789,A new biologically inspired active appearance model for face age estimation by using local ordinal ranking,2013 +33,MORPH Non-Commercial,morph_nc,39.94976005,116.33629046,Beijing Jiaotong University,edu,4b9ec224949c79a980a5a66664d0ac6233c3d575,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7565501,Human Facial Age Estimation by Cost-Sensitive Label Ranking and Trace Norm Regularization,2017 +34,MORPH Non-Commercial,morph_nc,43.1576969,-77.58829158,University of Rochester,edu,4b9ec224949c79a980a5a66664d0ac6233c3d575,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7565501,Human Facial Age Estimation by Cost-Sensitive Label Ranking and Trace Norm Regularization,2017 +35,MORPH Non-Commercial,morph_nc,1.2962018,103.77689944,National University of Singapore,edu,4b9ec224949c79a980a5a66664d0ac6233c3d575,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7565501,Human Facial Age Estimation by Cost-Sensitive Label Ranking and Trace Norm Regularization,2017 +36,MORPH Non-Commercial,morph_nc,40.0044795,116.370238,Chinese Academy of Sciences,edu,bd8b7599acf53e3053aa27cfd522764e28474e57,citation,http://www.jdl.ac.cn/doc/2009/iccv09_Learning%20Long%20Term%20Face%20Aging%20Patterns%20from%20Partially%20Dense%20Aging%20Databases.pdf,Learning long term face aging patterns from partially dense aging databases,2009 +37,MORPH Non-Commercial,morph_nc,39.9922379,116.30393816,Peking University,edu,bd8b7599acf53e3053aa27cfd522764e28474e57,citation,http://www.jdl.ac.cn/doc/2009/iccv09_Learning%20Long%20Term%20Face%20Aging%20Patterns%20from%20Partially%20Dense%20Aging%20Databases.pdf,Learning long term face aging patterns from partially dense aging databases,2009 +38,MORPH Non-Commercial,morph_nc,43.614386,7.071125,EURECOM,edu,70569810e46f476515fce80a602a210f8d9a2b95,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.105,Apparent Age Estimation from Face Images Combining General and Children-Specialized Deep Learning Models,2016 +39,MORPH Non-Commercial,morph_nc,39.9213097,32.7988233,"TOBB Economy and Technology University, Ankara, Turkey",edu,cc1ed45b02d7fffb42a0fd8cffe5f11792b6ea74,citation,https://doi.org/10.1109/SIU.2016.7495874,Analysis of the effect of image resolution on automatic face gender and age classification,2016 +40,MORPH Non-Commercial,morph_nc,-33.91758275,151.23124025,University of New South Wales,edu,29631ca6cff21c9199c70bcdbbcd5f812d331a96,citation,http://pdfs.semanticscholar.org/2963/1ca6cff21c9199c70bcdbbcd5f812d331a96.pdf,Error Rates in Users of Automatic Face Recognition Software,2015 +41,MORPH Non-Commercial,morph_nc,-33.88890695,151.18943366,University of Sydney,edu,29631ca6cff21c9199c70bcdbbcd5f812d331a96,citation,http://pdfs.semanticscholar.org/2963/1ca6cff21c9199c70bcdbbcd5f812d331a96.pdf,Error Rates in Users of Automatic Face Recognition Software,2015 +42,MORPH Non-Commercial,morph_nc,42.718568,-84.47791571,Michigan State University,edu,1a53ca294bbe5923c46a339955e8207907e9c8c6,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7273870,What Else Does Your Biometric Data Reveal? A Survey on Soft Biometrics,2016 +43,MORPH Non-Commercial,morph_nc,43.614386,7.071125,EURECOM,edu,1a53ca294bbe5923c46a339955e8207907e9c8c6,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7273870,What Else Does Your Biometric Data Reveal? A Survey on Soft Biometrics,2016 +44,MORPH Non-Commercial,morph_nc,40.4319722,-86.92389368,Purdue University,edu,c7c53d75f6e963b403057d8ba5952e4974a779ad,citation,https://pdfs.semanticscholar.org/c7c5/3d75f6e963b403057d8ba5952e4974a779ad.pdf,Aging effects in automated face recognition,2018 +45,MORPH Non-Commercial,morph_nc,41.02451875,28.97697953,Bahçeşehir University,edu,0c2370e156a4eb8d84a5fdb049c5a894c3431f1c,citation,https://doi.org/10.1109/CIBIM.2014.7015437,Biometric template update under facial aging,2014 +46,MORPH Non-Commercial,morph_nc,53.22853665,-0.54873472,University of Lincoln,edu,0c2370e156a4eb8d84a5fdb049c5a894c3431f1c,citation,https://doi.org/10.1109/CIBIM.2014.7015437,Biometric template update under facial aging,2014 +47,MORPH Non-Commercial,morph_nc,46.0810723,13.2119474,University of Udine,edu,0c2370e156a4eb8d84a5fdb049c5a894c3431f1c,citation,https://doi.org/10.1109/CIBIM.2014.7015437,Biometric template update under facial aging,2014 +48,MORPH Non-Commercial,morph_nc,25.0410728,121.6147562,Institute of Information Science,edu,1c17450c4d616e1e1eece248c42eba4f87de9e0d,citation,http://pdfs.semanticscholar.org/d269/39a00a8d3964de612cd3faa86764343d5622.pdf,Automatic Age Estimation from Face Images via Deep Ranking,2015 +49,MORPH Non-Commercial,morph_nc,43.47061295,-80.54724732,University of Waterloo,edu,f2902f5956d7e2dca536d9131d4334f85f52f783,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460191,Facial age estimation using Clustered Multi-task Support Vector Regression Machine,2012 +50,MORPH Non-Commercial,morph_nc,39.65404635,-79.96475355,West Virginia University,edu,ba2bbef34f05551291410103e3de9e82fdf9dddd,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Guo_A_Study_on_2014_CVPR_paper.pdf,A Study on Cross-Population Age Estimation,2014 +51,MORPH Non-Commercial,morph_nc,31.32235655,121.38400941,Shanghai University,edu,d454ad60b061c1a1450810a0f335fafbfeceeccc,citation,https://arxiv.org/pdf/1712.07195.pdf,Deep Regression Forests for Age Estimation,2017 +52,MORPH Non-Commercial,morph_nc,42.718568,-84.47791571,Michigan State University,edu,ad2cb5c255e555d9767d526721a4c7053fa2ac58,citation,https://arxiv.org/pdf/1711.03990.pdf,Longitudinal Study of Child Face Recognition,2018 +53,MORPH Non-Commercial,morph_nc,39.95472495,-75.15346905,Temple University,edu,0cf2eecf20cfbcb7f153713479e3206670ea0e9c,citation,https://arxiv.org/pdf/1806.08906.pdf,Privacy-Protective-GAN for Face De-identification,2018 +54,MORPH Non-Commercial,morph_nc,31.32235655,121.38400941,Shanghai University,edu,c0b02be66a5a1907e8cfb8117de50f80b90a65a8,citation,http://doi.acm.org/10.1145/2808492.2808523,Manifold learning in sparse selected feature subspaces,2015 +55,MORPH Non-Commercial,morph_nc,47.6423318,-122.1369302,Microsoft,company,ff012c56b9b1de969328dacd13e26b7138ff298b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7762921,Facial Age Estimation With Age Difference,2017 +56,MORPH Non-Commercial,morph_nc,1.2962018,103.77689944,National University of Singapore,edu,ff012c56b9b1de969328dacd13e26b7138ff298b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7762921,Facial Age Estimation With Age Difference,2017 +57,MORPH Non-Commercial,morph_nc,31.846918,117.29053367,Hefei University of Technology,edu,ff012c56b9b1de969328dacd13e26b7138ff298b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7762921,Facial Age Estimation With Age Difference,2017 +58,MORPH Non-Commercial,morph_nc,1.3484104,103.68297965,Nanyang Technological University,edu,ff012c56b9b1de969328dacd13e26b7138ff298b,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7762921,Facial Age Estimation With Age Difference,2017 +59,MORPH Non-Commercial,morph_nc,40.00229045,116.32098908,Tsinghua University,edu,2149d49c84a83848d6051867290d9c8bfcef0edb,citation,https://doi.org/10.1109/TIFS.2017.2746062,Label-Sensitive Deep Metric Learning for Facial Age Estimation,2018 +60,MORPH Non-Commercial,morph_nc,25.0410728,121.6147562,Institute of Information Science,edu,c44c84540db1c38ace232ef34b03bda1c81ba039,citation,http://pdfs.semanticscholar.org/c44c/84540db1c38ace232ef34b03bda1c81ba039.pdf,Cross-Age Reference Coding for Age-Invariant Face Recognition and Retrieval,2014 +61,MORPH Non-Commercial,morph_nc,25.01682835,121.53846924,National Taiwan University,edu,c44c84540db1c38ace232ef34b03bda1c81ba039,citation,http://pdfs.semanticscholar.org/c44c/84540db1c38ace232ef34b03bda1c81ba039.pdf,Cross-Age Reference Coding for Age-Invariant Face Recognition and Retrieval,2014 +62,MORPH Non-Commercial,morph_nc,33.5866784,-101.87539204,Electrical and Computer Engineering,edu,ebb3d5c70bedf2287f9b26ac0031004f8f617b97,citation,https://doi.org/10.1109/MSP.2017.2764116,"Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans",2018 +63,MORPH Non-Commercial,morph_nc,39.2899685,-76.62196103,University of Maryland,edu,ebb3d5c70bedf2287f9b26ac0031004f8f617b97,citation,https://doi.org/10.1109/MSP.2017.2764116,"Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans",2018 +64,MORPH Non-Commercial,morph_nc,42.718568,-84.47791571,Michigan State University,edu,2f2406551c693d616a840719ae1e6ea448e2f5d3,citation,http://biometrics.cse.msu.edu/Presentations/CharlesOtto_ICB13_AgeEstimationFaceImages_HumanVsMachinePerformance.pdf,Age estimation from face images: Human vs. machine performance,2013 +65,MORPH Non-Commercial,morph_nc,1.3037257,103.7737763,"Advanced Digital Sciences Center, Singapore",edu,15fbb5fc3bdd692a6b2dd737cce7f39f7c89a25c,citation,https://doi.org/10.1109/TMM.2011.2167317,Web Image and Video Mining Towards Universal and Robust Age Estimator,2011 +66,MORPH Non-Commercial,morph_nc,1.2962018,103.77689944,National University of Singapore,edu,15fbb5fc3bdd692a6b2dd737cce7f39f7c89a25c,citation,https://doi.org/10.1109/TMM.2011.2167317,Web Image and Video Mining Towards Universal and Robust Age Estimator,2011 +67,MORPH Non-Commercial,morph_nc,42.718568,-84.47791571,Michigan State University,edu,b446bcd7fb78adfe346cf7a01a38e4f43760f363,citation,http://pdfs.semanticscholar.org/b446/bcd7fb78adfe346cf7a01a38e4f43760f363.pdf,To appear in ICB 2018 Longitudinal Study of Child Face Recognition,2017 +68,MORPH Non-Commercial,morph_nc,42.718568,-84.47791571,Michigan State University,edu,c035c193eed5d72c7f187f0bc880a17d217dada0,citation,http://pdfs.semanticscholar.org/c035/c193eed5d72c7f187f0bc880a17d217dada0.pdf,"Local Gradient Gabor Pattern (LGGP) with Applications in Face Recognition, Cross-spectral Matching and Soft Biometrics",2013 +69,MORPH Non-Commercial,morph_nc,39.65404635,-79.96475355,West Virginia University,edu,c035c193eed5d72c7f187f0bc880a17d217dada0,citation,http://pdfs.semanticscholar.org/c035/c193eed5d72c7f187f0bc880a17d217dada0.pdf,"Local Gradient Gabor Pattern (LGGP) with Applications in Face Recognition, Cross-spectral Matching and Soft Biometrics",2013 +70,MORPH Non-Commercial,morph_nc,34.66869155,-82.83743476,Clemson University,edu,c907104680ad53bdc673f2648d713e4d26335825,citation,http://doi.acm.org/10.1145/3077286.3077304,Dataset and Metrics for Adult Age-Progression Evaluation,2017 +71,MORPH Non-Commercial,morph_nc,34.2375581,-77.9270129,University of North Carolina Wilmington,edu,c907104680ad53bdc673f2648d713e4d26335825,citation,http://doi.acm.org/10.1145/3077286.3077304,Dataset and Metrics for Adult Age-Progression Evaluation,2017 +72,MORPH Non-Commercial,morph_nc,37.5600406,126.9369248,Yonsei University,edu,fde41dc4ec6ac6474194b99e05b43dd6a6c4f06f,citation,https://arxiv.org/pdf/1809.01990.pdf,Multi-Expert Gender Classification on Age Group by Integrating Deep Neural Networks,2018 +73,MORPH Non-Commercial,morph_nc,34.2375581,-77.9270129,University of North Carolina Wilmington,edu,31a36014354ee7c89aa6d94e656db77922b180a5,citation,http://doi.acm.org/10.1145/2304496.2304509,An interactive tool for extremely dense landmarking of faces,2012 +74,MORPH Non-Commercial,morph_nc,37.5901411,127.0362318,Korea University,edu,4b519e2e88ccd45718b0fc65bfd82ebe103902f7,citation,http://biometrics.cse.msu.edu/Publications/Face/LiParkJain_DiscriminativeModelAgeInvariantFR_TIFS11.pdf,A Discriminative Model for Age Invariant Face Recognition,2011 +75,MORPH Non-Commercial,morph_nc,42.718568,-84.47791571,Michigan State University,edu,4b519e2e88ccd45718b0fc65bfd82ebe103902f7,citation,http://biometrics.cse.msu.edu/Publications/Face/LiParkJain_DiscriminativeModelAgeInvariantFR_TIFS11.pdf,A Discriminative Model for Age Invariant Face Recognition,2011 +76,MORPH Non-Commercial,morph_nc,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,4b519e2e88ccd45718b0fc65bfd82ebe103902f7,citation,http://biometrics.cse.msu.edu/Publications/Face/LiParkJain_DiscriminativeModelAgeInvariantFR_TIFS11.pdf,A Discriminative Model for Age Invariant Face Recognition,2011 +77,MORPH Non-Commercial,morph_nc,23.09461185,113.28788994,Sun Yat-Sen University,edu,23edcd0d2011d9c0d421193af061f2eb3e155da3,citation,http://doi.org/10.1007/s00371-015-1137-4,Facial age estimation by using stacked feature composition and selection,2015 +78,MORPH Non-Commercial,morph_nc,23.04436505,113.36668458,Guangzhou University,edu,23edcd0d2011d9c0d421193af061f2eb3e155da3,citation,http://doi.org/10.1007/s00371-015-1137-4,Facial age estimation by using stacked feature composition and selection,2015 +79,MORPH Non-Commercial,morph_nc,38.9530519,-77.3354508,"Cernium Corporation, Reston, VA, USA",company,604a281100784b4d5bc1a6db993d423abc5dc8f0,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5353681,Face Verification Across Age Progression Using Discriminative Methods,2010 +80,MORPH Non-Commercial,morph_nc,39.2899685,-76.62196103,University of Maryland,edu,604a281100784b4d5bc1a6db993d423abc5dc8f0,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5353681,Face Verification Across Age Progression Using Discriminative Methods,2010 +81,MORPH Non-Commercial,morph_nc,39.95472495,-75.15346905,Temple University,edu,604a281100784b4d5bc1a6db993d423abc5dc8f0,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5353681,Face Verification Across Age Progression Using Discriminative Methods,2010 +82,MORPH Non-Commercial,morph_nc,51.2975344,1.07296165,University of Kent,edu,6486b36c6f7fd7675257d26e896223a02a1881d9,citation,https://doi.org/10.1109/THMS.2014.2376874,Selective Review and Analysis of Aging Effects in Biometric System Implementation,2015 +83,MORPH Non-Commercial,morph_nc,22.42031295,114.20788644,Chinese University of Hong Kong,edu,16bce9f940bb01aa5ec961892cc021d4664eb9e4,citation,http://www.cise.ufl.edu/~dihong/assets/TIST-2014-10-0214.R2.pdf,Mutual Component Analysis for Heterogeneous Face Recognition,2016 +84,MORPH Non-Commercial,morph_nc,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,16bce9f940bb01aa5ec961892cc021d4664eb9e4,citation,http://www.cise.ufl.edu/~dihong/assets/TIST-2014-10-0214.R2.pdf,Mutual Component Analysis for Heterogeneous Face Recognition,2016 +85,MORPH Non-Commercial,morph_nc,34.67567405,33.04577648,Cyprus University of Technology,edu,9d3aa3b7d392fad596b067b13b9e42443bbc377c,citation,http://pdfs.semanticscholar.org/9d3a/a3b7d392fad596b067b13b9e42443bbc377c.pdf,Facial Biometric Templates and Aging: Problems and Challenges for Artificial Intelligence,2009 +86,MORPH Non-Commercial,morph_nc,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,217a21d60bb777d15cd9328970cab563d70b5d23,citation,http://www.cise.ufl.edu/~dihong/assets/iccv2013.pdf,Hidden Factor Analysis for Age Invariant Face Recognition,2013 +87,MORPH Non-Commercial,morph_nc,22.42031295,114.20788644,Chinese University of Hong Kong,edu,217a21d60bb777d15cd9328970cab563d70b5d23,citation,http://www.cise.ufl.edu/~dihong/assets/iccv2013.pdf,Hidden Factor Analysis for Age Invariant Face Recognition,2013 +88,MORPH Non-Commercial,morph_nc,32.0565957,118.77408833,Nanjing University,edu,b1bb517bd87a1212174033fc786b2237844b04e6,citation,https://doi.org/10.1016/j.neucom.2015.03.078,Cumulative attribute relation regularization learning for human age estimation,2015 +89,MORPH Non-Commercial,morph_nc,40.8419836,-73.94368971,Columbia University,edu,a0dc68c546e0fc72eb0d9ca822cf0c9ccb4b4c4f,citation,http://www.cs.columbia.edu/~neeraj/base/papers/nk_ijcb2011_fusion.pdf,Fusing with context: A Bayesian approach to combining descriptive attributes,2011 +90,MORPH Non-Commercial,morph_nc,34.2375581,-77.9270129,University of North Carolina Wilmington,edu,a0dc68c546e0fc72eb0d9ca822cf0c9ccb4b4c4f,citation,http://www.cs.columbia.edu/~neeraj/base/papers/nk_ijcb2011_fusion.pdf,Fusing with context: A Bayesian approach to combining descriptive attributes,2011 +91,MORPH Non-Commercial,morph_nc,1.3484104,103.68297965,Nanyang Technological University,edu,d119443de1d75cad384d897c2ed5a7b9c1661d98,citation,https://doi.org/10.1109/ICIP.2010.5650873,Cost-sensitive subspace learning for human age estimation,2010 +92,MORPH Non-Commercial,morph_nc,34.2249827,-77.86907744,University of North Carolina at Wilmington,edu,97c59db934ff85c60c460a4591106682b5ab9caa,citation,https://doi.org/10.1109/BTAS.2012.6374568,Extremely dense face registration: Comparing automatic landmarking algorithms for general and ethno-gender models,2012 +93,MORPH Non-Commercial,morph_nc,43.2213516,-75.4085577,"Air Force Research Lab, Rome, NY",mil,834736698f2cc5c221c22369abe95515243a9fc3,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6996249,GARP-face: Balancing privacy protection and utility preservation in face de-identification,2014 +94,MORPH Non-Commercial,morph_nc,39.95472495,-75.15346905,Temple University,edu,834736698f2cc5c221c22369abe95515243a9fc3,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6996249,GARP-face: Balancing privacy protection and utility preservation in face de-identification,2014 +95,MORPH Non-Commercial,morph_nc,32.0575279,118.78682252,Southeast University,edu,3cb488a3b71f221a8616716a1fc2b951dd0de549,citation,http://doi.ieeecomputersociety.org/10.1109/ICPR.2014.764,Facial Age Estimation by Adaptive Label Distribution Learning,2014 +96,MORPH Non-Commercial,morph_nc,22.3386304,114.2620337,Hong Kong University of Science and Technology,edu,8000c4f278e9af4d087c0d0895fff7012c5e3d78,citation,https://www.cse.ust.hk/~yuzhangcse/papers/Zhang_Yeung_CVPR10.pdf,Multi-task warped Gaussian process for personalized age estimation,2010 +97,MORPH Non-Commercial,morph_nc,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,59fe66eeb06d1a7e1496a85f7ffc7b37512cd7e5,citation,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552862,Robust feature encoding for age-invariant face recognition,2016 +98,MORPH Non-Commercial,morph_nc,23.0502042,113.39880323,South China University of Technology,edu,4bd3de97b256b96556d19a5db71dda519934fd53,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.529,Latent Factor Guided Convolutional Neural Networks for Age-Invariant Face Recognition,2016 +99,MORPH Non-Commercial,morph_nc,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,4bd3de97b256b96556d19a5db71dda519934fd53,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.529,Latent Factor Guided Convolutional Neural Networks for Age-Invariant Face Recognition,2016 +100,MORPH Non-Commercial,morph_nc,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,1d3dd9aba79a53390317ec1e0b7cd742cba43132,citation,http://www.cise.ufl.edu/~dihong/assets/Gong_A_Maximum_Entropy_2015_CVPR_paper.pdf,A maximum entropy feature descriptor for age invariant face recognition,2015 +101,MORPH Non-Commercial,morph_nc,40.0044795,116.370238,Chinese Academy of Sciences,edu,1d3dd9aba79a53390317ec1e0b7cd742cba43132,citation,http://www.cise.ufl.edu/~dihong/assets/Gong_A_Maximum_Entropy_2015_CVPR_paper.pdf,A maximum entropy feature descriptor for age invariant face recognition,2015 +102,MORPH Non-Commercial,morph_nc,22.42031295,114.20788644,Chinese University of Hong Kong,edu,1d3dd9aba79a53390317ec1e0b7cd742cba43132,citation,http://www.cise.ufl.edu/~dihong/assets/Gong_A_Maximum_Entropy_2015_CVPR_paper.pdf,A maximum entropy feature descriptor for age invariant face recognition,2015 +103,MORPH Non-Commercial,morph_nc,32.0575279,118.78682252,Southeast University,edu,1c530de1a94ac70bf9086e39af1712ea8d2d2781,citation,http://pdfs.semanticscholar.org/1c53/0de1a94ac70bf9086e39af1712ea8d2d2781.pdf,Sparsity Conditional Energy Label Distribution Learning for Age Estimation,2016 +104,MORPH Non-Commercial,morph_nc,37.4102193,-122.05965487,Carnegie Mellon University,edu,eb8519cec0d7a781923f68fdca0891713cb81163,citation,https://arxiv.org/pdf/1703.08617.pdf,Temporal Non-volume Preserving Approach to Facial Age-Progression and Age-Invariant Face Recognition,2017 +105,MORPH Non-Commercial,morph_nc,45.57022705,-122.63709346,Concordia University,edu,eb8519cec0d7a781923f68fdca0891713cb81163,citation,https://arxiv.org/pdf/1703.08617.pdf,Temporal Non-volume Preserving Approach to Facial Age-Progression and Age-Invariant Face Recognition,2017 +106,MORPH Non-Commercial,morph_nc,57.6252103,39.8845656,Yaroslavl State University,edu,cfaf61bacf61901b7e1ac25b779a1f87c1e8cf7f,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6737950,Application for video analysis based on machine learning and computer vision algorithms,2013 +107,MORPH Non-Commercial,morph_nc,51.49887085,-0.17560797,Imperial College London,edu,54bb25a213944b08298e4e2de54f2ddea890954a,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf,"AgeDB: The First Manually Collected, In-the-Wild Age Database",2017 +108,MORPH Non-Commercial,morph_nc,51.59029705,-0.22963221,Middlesex University,edu,54bb25a213944b08298e4e2de54f2ddea890954a,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf,"AgeDB: The First Manually Collected, In-the-Wild Age Database",2017 +109,MORPH Non-Commercial,morph_nc,37.4102193,-122.05965487,Carnegie Mellon University,edu,17670b60dcfb5cbf8fdae0b266e18cf995f6014c,citation,http://arxiv.org/abs/1606.02254,Longitudinal Face Modeling via Temporal Deep Restricted Boltzmann Machines,2016 +110,MORPH Non-Commercial,morph_nc,45.57022705,-122.63709346,Concordia University,edu,17670b60dcfb5cbf8fdae0b266e18cf995f6014c,citation,http://arxiv.org/abs/1606.02254,Longitudinal Face Modeling via Temporal Deep Restricted Boltzmann Machines,2016 +111,MORPH Non-Commercial,morph_nc,46.0658836,11.1159894,University of Trento,edu,2fd96238a7e372146cdf6c2338edc932031dd1f0,citation,https://arxiv.org/pdf/1802.00237.pdf,Face Aging with Contextual Generative Adversarial Nets,2017 +112,MORPH Non-Commercial,morph_nc,1.2962018,103.77689944,National University of Singapore,edu,2fd96238a7e372146cdf6c2338edc932031dd1f0,citation,https://arxiv.org/pdf/1802.00237.pdf,Face Aging with Contextual Generative Adversarial Nets,2017 +113,MORPH Non-Commercial,morph_nc,51.44415765,7.26096541,Ruhr-University Bochum,edu,b249f10a30907a80f2a73582f696bc35ba4db9e2,citation,http://pdfs.semanticscholar.org/f06d/6161eef9325285b32356e1c4b5527479eb9b.pdf,Improved graph-based SFA: Information preservation complements the slowness principle,2016 +114,MORPH Non-Commercial,morph_nc,39.9808333,116.34101249,Beihang University,edu,8b266e68cc71f98ee42b04dc8f3e336c47f199cb,citation,https://arxiv.org/pdf/1711.10352.pdf,Learning Face Age Progression: A Pyramid Architecture of GANs,2017 +115,MORPH Non-Commercial,morph_nc,42.718568,-84.47791571,Michigan State University,edu,8b266e68cc71f98ee42b04dc8f3e336c47f199cb,citation,https://arxiv.org/pdf/1711.10352.pdf,Learning Face Age Progression: A Pyramid Architecture of GANs,2017 +116,MORPH Non-Commercial,morph_nc,32.0565957,118.77408833,Nanjing University,edu,0e2d956790d3b8ab18cee8df6c949504ee78ad42,citation,https://doi.org/10.1109/IVCNZ.2013.6727024,Scalable face image retrieval integrating multi-feature quantization and constrained reference re-ranking,2013 +117,MORPH Non-Commercial,morph_nc,40.0044795,116.370238,Chinese Academy of Sciences,edu,2a7e6a1b2638550370a47f2f6f6e38e76fe9ac13,citation,http://doi.acm.org/10.1145/3090311,Multifeature Anisotropic Orthogonal Gaussian Process for Automatic Age Estimation,2017 +118,MORPH Non-Commercial,morph_nc,-33.88890695,151.18943366,University of Sydney,edu,2a7e6a1b2638550370a47f2f6f6e38e76fe9ac13,citation,http://doi.acm.org/10.1145/3090311,Multifeature Anisotropic Orthogonal Gaussian Process for Automatic Age Estimation,2017 +119,MORPH Non-Commercial,morph_nc,51.2975344,1.07296165,University of Kent,edu,2336de3a81dada63eb00ea82f7570c4069342fb5,citation,http://doi.acm.org/10.1145/2361407.2361428,A methodological framework for investigating age factors on the performance of biometric systems,2012 +120,MORPH Non-Commercial,morph_nc,39.2899685,-76.62196103,University of Maryland,edu,93420d9212dd15b3ef37f566e4d57e76bb2fab2f,citation,https://arxiv.org/pdf/1611.00851.pdf,An All-In-One Convolutional Neural Network for Face Analysis,2017 +121,MORPH Non-Commercial,morph_nc,39.95472495,-75.15346905,Temple University,edu,019e471667c72b5b3728b4a9ba9fe301a7426fb2,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2A_012.pdf,Cross-age face verification by coordinating with cross-face age verification,2015 +122,MORPH Non-Commercial,morph_nc,45.57022705,-122.63709346,Concordia University,edu,c418a3441f992fea523926f837f4bfb742548c16,citation,http://pdfs.semanticscholar.org/c418/a3441f992fea523926f837f4bfb742548c16.pdf,A Computer Approach for Face Aging Problems,2010 +123,MORPH Non-Commercial,morph_nc,22.42031295,114.20788644,Chinese University of Hong Kong,edu,d80a3d1f3a438e02a6685e66ee908446766fefa9,citation,https://arxiv.org/pdf/1708.09687.pdf,Quantifying Facial Age by Posterior of Age Comparisons,2017 +124,MORPH Non-Commercial,morph_nc,34.67567405,33.04577648,Cyprus University of Technology,edu,ebbceab4e15bf641f74e335b70c6c4490a043961,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813349,Evaluating the performance of face-aging algorithms,2008 +125,MORPH Non-Commercial,morph_nc,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,d84a48f7d242d73b32a9286f9b148f5575acf227,citation,http://pdfs.semanticscholar.org/d84a/48f7d242d73b32a9286f9b148f5575acf227.pdf,Global and Local Consistent Age Generative Adversarial Networks,2018 +126,MORPH Non-Commercial,morph_nc,12.9551259,77.5741985,Bangalore Institute of Technology,edu,8f5facdc0a2a79283864aad03edc702e2a400346,citation,http://pdfs.semanticscholar.org/8f5f/acdc0a2a79283864aad03edc702e2a400346.pdf,Estimation Framework using Bio - Inspired Features for Facial Image,0 +127,MORPH Non-Commercial,morph_nc,42.718568,-84.47791571,Michigan State University,edu,08f6ad0a3e75b715852f825d12b6f28883f5ca05,citation,http://www.cse.msu.edu/biometrics/Publications/Face/JainKlarePark_FaceRecognition_ChallengesinForensics_FG11.pdf,Face recognition: Some challenges in forensics,2011 +128,MORPH Non-Commercial,morph_nc,41.10427915,29.02231159,Istanbul Technical University,edu,2050847bc7a1a0453891f03aeeb4643e360fde7d,citation,https://cvhci.anthropomatik.kit.edu/~mtapaswi/papers/ICMR2015.pdf,Accio: A Data Set for Face Track Retrieval in Movies Across Age,2015 +129,MORPH Non-Commercial,morph_nc,49.10184375,8.4331256,Karlsruhe Institute of Technology,edu,2050847bc7a1a0453891f03aeeb4643e360fde7d,citation,https://cvhci.anthropomatik.kit.edu/~mtapaswi/papers/ICMR2015.pdf,Accio: A Data Set for Face Track Retrieval in Movies Across Age,2015 +130,MORPH Non-Commercial,morph_nc,40.62984145,22.9588935,Aristotle University of Thessaloniki,edu,3cc46bf79fb9225cf308815c7d41c8dd5625cc29,citation,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/2016/Pantraki2016.pdf,Age interval and gender prediction using PARAFAC2 applied to speech utterances,2016 +131,MORPH Non-Commercial,morph_nc,34.67567405,33.04577648,Cyprus University of Technology,edu,3cc46bf79fb9225cf308815c7d41c8dd5625cc29,citation,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/2016/Pantraki2016.pdf,Age interval and gender prediction using PARAFAC2 applied to speech utterances,2016 +132,MORPH Non-Commercial,morph_nc,23.09461185,113.28788994,Sun Yat-Sen University,edu,189e5a2fa51ed471c0e7227d82dffb52736070d8,citation,https://doi.org/10.1109/ICIP.2017.8296995,Cross-age face recognition using reference coding with kernel direct discriminant analysis,2017 +133,MORPH Non-Commercial,morph_nc,42.357757,-83.06286711,Wayne State University,edu,4f1249369127cc2e2894f6b2f1052d399794919a,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8239663,Deep Age Estimation: From Classification to Ranking,2018 +134,MORPH Non-Commercial,morph_nc,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,cd6aaa37fffd0b5c2320f386be322b8adaa1cc68,citation,https://arxiv.org/pdf/1804.06655.pdf,Deep Face Recognition: A Survey,2018 +135,MORPH Non-Commercial,morph_nc,52.3553655,4.9501644,University of Amsterdam,edu,14014a1bdeb5d63563b68b52593e3ac1e3ce7312,citation,http://pdfs.semanticscholar.org/1401/4a1bdeb5d63563b68b52593e3ac1e3ce7312.pdf,Expression-Invariant Age Estimation,2014 +136,MORPH Non-Commercial,morph_nc,31.83907195,117.26420748,University of Science and Technology of China,edu,659dc6aa517645a118b79f0f0273e46ab7b53cd9,citation,https://doi.org/10.1109/ACPR.2015.7486608,Age-invariant face recognition using a feature progressing model,2015 +137,MORPH Non-Commercial,morph_nc,30.0818727,31.24454841,Benha University,edu,a9fc23d612e848250d5b675e064dba98f05ad0d9,citation,http://pdfs.semanticscholar.org/a9fc/23d612e848250d5b675e064dba98f05ad0d9.pdf,Face Age Estimation Approach based on Deep Learning and Principle Component Analysis,2018 +138,MORPH Non-Commercial,morph_nc,31.51368535,34.44019341,"Islamic University of Gaza, Palestine",edu,d5fa9d98c8da54a57abf353767a927d662b7f026,citation,http://pdfs.semanticscholar.org/f15e/9712b8731e1f5fd9566aca513edda910b5b8.pdf,Age Estimation based on Neural Networks using Face Features,2010 +139,MORPH Non-Commercial,morph_nc,32.0575279,118.78682252,Southeast University,edu,8ff8c64288a2f7e4e8bf8fda865820b04ab3dbe8,citation,https://pdfs.semanticscholar.org/0056/92b9fa6728df3a7f14578c43410867bba425.pdf,Age Estimation Using Expectation of Label Distribution Learning,2018 +140,MORPH Non-Commercial,morph_nc,32.0565957,118.77408833,Nanjing University,edu,8ff8c64288a2f7e4e8bf8fda865820b04ab3dbe8,citation,https://pdfs.semanticscholar.org/0056/92b9fa6728df3a7f14578c43410867bba425.pdf,Age Estimation Using Expectation of Label Distribution Learning,2018 +141,MORPH Non-Commercial,morph_nc,34.0224149,-118.28634407,University of Southern California,edu,eb6ee56e085ebf473da990d032a4249437a3e462,citation,http://www-scf.usc.edu/~chuntinh/doc/Age_Gender_Classification_APSIPA_2017.pdf,Age/gender classification with whole-component convolutional neural networks (WC-CNN),2017 +142,MORPH Non-Commercial,morph_nc,42.718568,-84.47791571,Michigan State University,edu,e506cdb250eba5e70c5147eb477fbd069714765b,citation,https://pdfs.semanticscholar.org/e506/cdb250eba5e70c5147eb477fbd069714765b.pdf,Heterogeneous Face Recognition,2012 +143,MORPH Non-Commercial,morph_nc,35.90503535,-79.04775327,University of North Carolina,edu,f374ac9307be5f25145b44931f5a53b388a77e49,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339060,Improvements in Active Appearance Model based synthetic age progression for adult aging,2009 +144,MORPH Non-Commercial,morph_nc,38.83133325,-77.30798839,George Mason University,edu,62750d78e819d745b9200b0c5c35fcae6fb9f404,citation,http://doi.org/10.1007/s11042-016-4085-8,Leveraging implicit demographic information for face recognition using a multi-expert system,2016 +145,MORPH Non-Commercial,morph_nc,41.9037626,12.5144384,Sapienza University of Rome,edu,62750d78e819d745b9200b0c5c35fcae6fb9f404,citation,http://doi.org/10.1007/s11042-016-4085-8,Leveraging implicit demographic information for face recognition using a multi-expert system,2016 +146,MORPH Non-Commercial,morph_nc,40.845492,14.2578058,University of Naples Federico II,edu,62750d78e819d745b9200b0c5c35fcae6fb9f404,citation,http://doi.org/10.1007/s11042-016-4085-8,Leveraging implicit demographic information for face recognition using a multi-expert system,2016 +147,MORPH Non-Commercial,morph_nc,25.01353105,121.54173736,National Taiwan University of Science and Technology,edu,e4c3587392d477b7594086c6f28a00a826abf004,citation,https://doi.org/10.1109/ICIP.2017.8296998,Face recognition by facial attribute assisted network,2017 +148,MORPH Non-Commercial,morph_nc,39.9922379,116.30393816,Peking University,edu,c4ca092972abb74ee1c20b7cae6e69c654479e2c,citation,https://doi.org/10.1109/ICIP.2016.7532960,Linear canonical correlation analysis based ranking approach for facial age estimation,2016 +149,MORPH Non-Commercial,morph_nc,40.0044795,116.370238,Chinese Academy of Sciences,edu,575141e42740564f64d9be8ab88d495192f5b3bc,citation,http://pdfs.semanticscholar.org/5751/41e42740564f64d9be8ab88d495192f5b3bc.pdf,Age Estimation Based on Multi-Region Convolutional Neural Network,2016 +150,MORPH Non-Commercial,morph_nc,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,575141e42740564f64d9be8ab88d495192f5b3bc,citation,http://pdfs.semanticscholar.org/5751/41e42740564f64d9be8ab88d495192f5b3bc.pdf,Age Estimation Based on Multi-Region Convolutional Neural Network,2016 +151,MORPH Non-Commercial,morph_nc,56.66340325,12.87929727,Halmstad University,edu,555f75077a02f33a05841f9b63a1388ec5fbcba5,citation,https://arxiv.org/pdf/1810.03360.pdf,A Survey on Periocular Biometrics Research,2016 +152,MORPH Non-Commercial,morph_nc,39.94976005,116.33629046,Beijing Jiaotong University,edu,0821028073981f9bd2dba2ad2557b25403fe7d7d,citation,http://doi.acm.org/10.1145/2733373.2806318,Facial Age Estimation Based on Structured Low-rank Representation,2015 +153,MORPH Non-Commercial,morph_nc,46.109237,7.08453549,IDIAP Research Institute,edu,939123cf21dc9189a03671484c734091b240183e,citation,http://publications.idiap.ch/downloads/papers/2015/Erdogmus_MMSP_2015.pdf,Within- and cross- database evaluations for face gender classification via befit protocols,2014 +154,MORPH Non-Commercial,morph_nc,36.689487,2.981877,"Center for Development of Advanced Technologies, Algeria",edu,4551194408383b12db19a22cca5db0f185cced5c,citation,https://doi.org/10.1109/TNNLS.2014.2341634,Nonlinear Topological Component Analysis: Application to Age-Invariant Face Recognition,2015 +155,MORPH Non-Commercial,morph_nc,56.45796755,-2.98214831,University of Dundee,edu,8b10383ef569ea0029a2c4a60cc2d8c87391b4db,citation,http://pdfs.semanticscholar.org/fe2d/20dca6dcedc7944cc2d9fea76de6cbb9d90c.pdf,Age classification using Radon transform and entropy based scaling SVM,2011 +156,MORPH Non-Commercial,morph_nc,40.0044795,116.370238,Chinese Academy of Sciences,edu,d37ca68742b2999667faf464f78d2fbf81e0cb07,citation,https://doi.org/10.1007/978-3-319-25417-3_76,DFDnet: Discriminant Face Descriptor Network for Facial Age Estimation,2015 +157,MORPH Non-Commercial,morph_nc,-35.2776999,149.118527,Australian National University,edu,a7191958e806fce2505a057196ccb01ea763b6ea,citation,http://pdfs.semanticscholar.org/a719/1958e806fce2505a057196ccb01ea763b6ea.pdf,Convolutional Neural Network based Age Estimation from Facial Image and Depth Prediction from Single Image,2016 +158,MORPH Non-Commercial,morph_nc,35.907757,127.766922,"Electronics and Telecommunications Research Institute, Korea",edu,abbc6dcbd032ff80e0535850f1bc27c4610b0d45,citation,https://doi.org/10.1109/ICIP.2015.7350983,Facial age estimation via extended curvature Gabor filter,2015 +159,MORPH Non-Commercial,morph_nc,36.3697191,127.362537,Korea Advanced Institute of Science and Technology,edu,abbc6dcbd032ff80e0535850f1bc27c4610b0d45,citation,https://doi.org/10.1109/ICIP.2015.7350983,Facial age estimation via extended curvature Gabor filter,2015 +160,MORPH Non-Commercial,morph_nc,1.2962018,103.77689944,National University of Singapore,edu,989332c5f1b22604d6bb1f78e606cb6b1f694e1a,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Wang_Recurrent_Face_Aging_CVPR_2016_paper.pdf,Recurrent Face Aging,2016 +161,MORPH Non-Commercial,morph_nc,32.0575279,118.78682252,Southeast University,edu,989332c5f1b22604d6bb1f78e606cb6b1f694e1a,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Wang_Recurrent_Face_Aging_CVPR_2016_paper.pdf,Recurrent Face Aging,2016 +162,MORPH Non-Commercial,morph_nc,46.0658836,11.1159894,University of Trento,edu,989332c5f1b22604d6bb1f78e606cb6b1f694e1a,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Wang_Recurrent_Face_Aging_CVPR_2016_paper.pdf,Recurrent Face Aging,2016 +163,MORPH Non-Commercial,morph_nc,40.62984145,22.9588935,Aristotle University of Thessaloniki,edu,1fd3dbb6e910708fa85c8a86e17ba0b6fef5617c,citation,http://pdfs.semanticscholar.org/1fd3/dbb6e910708fa85c8a86e17ba0b6fef5617c.pdf,Age interval and gender prediction using PARAFAC2 on speech recordings and face images,2016 +164,MORPH Non-Commercial,morph_nc,40.00229045,116.32098908,Tsinghua University,edu,6c6f0e806e4e286f3b18b934f42c72b67030ce17,citation,https://doi.org/10.1109/FG.2011.5771345,Combination of age and head pose for adult face verification,2011 +165,MORPH Non-Commercial,morph_nc,46.5190557,6.5667576,"Swiss Federal, Institute of Technology, Lausanne",edu,6c6f0e806e4e286f3b18b934f42c72b67030ce17,citation,https://doi.org/10.1109/FG.2011.5771345,Combination of age and head pose for adult face verification,2011 +166,MORPH Non-Commercial,morph_nc,52.6221571,1.2409136,University of East Anglia,edu,05a0d04693b2a51a8131d195c68ad9f5818b2ce1,citation,http://pdfs.semanticscholar.org/05a0/d04693b2a51a8131d195c68ad9f5818b2ce1.pdf,Dual-reference Face Retrieval: What Does He/She Look Like at Age 'X'?,2017 +167,MORPH Non-Commercial,morph_nc,40.44415295,-79.96243993,University of Pittsburgh,edu,05a0d04693b2a51a8131d195c68ad9f5818b2ce1,citation,http://pdfs.semanticscholar.org/05a0/d04693b2a51a8131d195c68ad9f5818b2ce1.pdf,Dual-reference Face Retrieval: What Does He/She Look Like at Age 'X'?,2017 +168,MORPH Non-Commercial,morph_nc,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,387b54cf6c186c12d83f95df6bd458c5eb1254ee,citation,https://doi.org/10.1109/VCIP.2017.8305123,Deep probabilities for age estimation,2017 +169,MORPH Non-Commercial,morph_nc,35.97320905,-78.89755054,North Carolina Central University,edu,1ca1b4f787712ede215030d22a0eea41534a601e,citation,https://doi.org/10.1109/CVPRW.2010.5543609,Human age estimation: What is the influence across race and gender?,2010 +170,MORPH Non-Commercial,morph_nc,39.65404635,-79.96475355,West Virginia University,edu,1ca1b4f787712ede215030d22a0eea41534a601e,citation,https://doi.org/10.1109/CVPRW.2010.5543609,Human age estimation: What is the influence across race and gender?,2010 +171,MORPH Non-Commercial,morph_nc,1.3484104,103.68297965,Nanyang Technological University,edu,b6a23f72007cb40223d7e1e1cc47e466716de945,citation,https://doi.org/10.1109/CVPRW.2010.5544598,Ordinary preserving manifold analysis for human age estimation,2010 +172,MORPH Non-Commercial,morph_nc,60.7897318,10.6821927,"Norwegian Biometrics Lab, NTNU, Norway",edu,0647c9d56cf11215894d57d677997826b22f6a13,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8401557,Transgender face recognition with off-the-shelf pre-trained CNNs: A comprehensive study,2018 +173,MORPH Non-Commercial,morph_nc,52.3553655,4.9501644,University of Amsterdam,edu,935a7793cbb8f102924fa34fce1049727de865c2,citation,https://doi.org/10.1109/ICIP.2015.7351554,Age estimation under changes in image quality: An experimental study,2015 +174,MORPH Non-Commercial,morph_nc,40.01407945,-105.26695944,"University of Colorado, Boulder",edu,4aabd6db4594212019c9af89b3e66f39f3108aac,citation,http://pdfs.semanticscholar.org/4aab/d6db4594212019c9af89b3e66f39f3108aac.pdf,The Mere Exposure Effect and Classical Conditioning,2015 +175,MORPH Non-Commercial,morph_nc,34.2375581,-77.9270129,University of North Carolina Wilmington,edu,73d15a975b0595e0cc2e0981a9396a89c474dc7e,citation,https://arxiv.org/pdf/1811.03680.pdf,Gender Effect on Face Recognition for a Large Longitudinal Database,2018 +176,MORPH Non-Commercial,morph_nc,40.00229045,116.32098908,Tsinghua University,edu,51bb86dc8748088a198b216f7e97616634147388,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6890496,Face age estimation by using Bisection Search Tree,2013 +177,MORPH Non-Commercial,morph_nc,1.3037257,103.7737763,"Advanced Digital Sciences Center, Singapore",edu,8cffe360a05085d4bcba111a3a3cd113d96c0369,citation,http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126248,Learning universal multi-view age estimator using video context,2011 +178,MORPH Non-Commercial,morph_nc,1.3170417,103.8321041,"Facebook, Singapore",company,8cffe360a05085d4bcba111a3a3cd113d96c0369,citation,http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126248,Learning universal multi-view age estimator using video context,2011 +179,MORPH Non-Commercial,morph_nc,1.2962018,103.77689944,National University of Singapore,edu,8cffe360a05085d4bcba111a3a3cd113d96c0369,citation,http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126248,Learning universal multi-view age estimator using video context,2011 +180,MORPH Non-Commercial,morph_nc,23.143197,113.34009651,South China Normal University,edu,dc6ad30c7a4bc79bb06b4725b16e202d3d7d8935,citation,http://doi.org/10.1007/s11042-017-4646-5,Age classification with deep learning face representation,2017 +181,MORPH Non-Commercial,morph_nc,23.0502042,113.39880323,South China University of Technology,edu,dc6ad30c7a4bc79bb06b4725b16e202d3d7d8935,citation,http://doi.org/10.1007/s11042-017-4646-5,Age classification with deep learning face representation,2017 +182,MORPH Non-Commercial,morph_nc,50.0764296,14.41802312,Czech Technical University,edu,023ed32ac3ea6029f09b8c582efbe3866de7d00a,citation,http://pdfs.semanticscholar.org/023e/d32ac3ea6029f09b8c582efbe3866de7d00a.pdf,Discriminative learning from partially annotated examples,2016 +183,MORPH Non-Commercial,morph_nc,35.5167538,139.48342251,Tokyo Institute of Technology,edu,435dc062d565ce87c6c20a5f49430eb9a4b573c4,citation,http://pdfs.semanticscholar.org/435d/c062d565ce87c6c20a5f49430eb9a4b573c4.pdf,Lighting Condition Adaptation for Perceived Age Estimation,2011 +184,MORPH Non-Commercial,morph_nc,42.718568,-84.47791571,Michigan State University,edu,6a5d7d20a8c4993d56bcf702c772aa3f95f99450,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813408,Face recognition with temporal invariance: A 3D aging model,2008 +185,MORPH Non-Commercial,morph_nc,35.97320905,-78.89755054,North Carolina Central University,edu,2a6783ae51d7ee781d584ef9a3eb8ab1997d0489,citation,https://doi.org/10.1109/CVPRW.2010.5543608,A study of large-scale ethnicity estimation with gender and age variations,2010 +186,MORPH Non-Commercial,morph_nc,39.65404635,-79.96475355,West Virginia University,edu,2a6783ae51d7ee781d584ef9a3eb8ab1997d0489,citation,https://doi.org/10.1109/CVPRW.2010.5543608,A study of large-scale ethnicity estimation with gender and age variations,2010 +187,MORPH Non-Commercial,morph_nc,40.00229045,116.32098908,Tsinghua University,edu,a53d13b9110cddb2a5f38b9d7ed69d328e3c6db9,citation,https://doi.org/10.1109/TIP.2015.2481327,Cost-Sensitive Local Binary Feature Learning for Facial Age Estimation,2015 +188,MORPH Non-Commercial,morph_nc,1.3484104,103.68297965,Nanyang Technological University,edu,a53d13b9110cddb2a5f38b9d7ed69d328e3c6db9,citation,https://doi.org/10.1109/TIP.2015.2481327,Cost-Sensitive Local Binary Feature Learning for Facial Age Estimation,2015 +189,MORPH Non-Commercial,morph_nc,42.718568,-84.47791571,Michigan State University,edu,141cb9ee401f223220d3468592effa90f0c255fa,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7815403,Longitudinal Study of Automatic Face Recognition,2015 +190,MORPH Non-Commercial,morph_nc,42.718568,-84.47791571,Michigan State University,edu,e22adcd2a6a7544f017ec875ce8f89d5c59e09c8,citation,https://arxiv.org/pdf/1807.11936.pdf,Gender Privacy: An Ensemble of Semi Adversarial Networks for Confounding Arbitrary Gender Classifiers,2018 +191,MORPH Non-Commercial,morph_nc,25.01682835,121.53846924,National Taiwan University,edu,6ab33fa51467595f18a7a22f1d356323876f8262,citation,http://www.iis.sinica.edu.tw/~kuangyu/OHRank_files/0523.pdf,Ordinal hyperplanes ranker with cost sensitivities for age estimation,2011 +192,MORPH Non-Commercial,morph_nc,25.0410728,121.6147562,Institute of Information Science,edu,6ab33fa51467595f18a7a22f1d356323876f8262,citation,http://www.iis.sinica.edu.tw/~kuangyu/OHRank_files/0523.pdf,Ordinal hyperplanes ranker with cost sensitivities for age estimation,2011 +193,MORPH Non-Commercial,morph_nc,25.0411727,121.6146518,"Academia Sinica, Taiwan",edu,6ab33fa51467595f18a7a22f1d356323876f8262,citation,http://www.iis.sinica.edu.tw/~kuangyu/OHRank_files/0523.pdf,Ordinal hyperplanes ranker with cost sensitivities for age estimation,2011 +194,MORPH Non-Commercial,morph_nc,1.2962018,103.77689944,National University of Singapore,edu,63488398f397b55552f484409b86d812dacde99a,citation,http://pdfs.semanticscholar.org/6348/8398f397b55552f484409b86d812dacde99a.pdf,Learning Universal Multi-view Age Estimator by Video Contexts,2011 +195,MORPH Non-Commercial,morph_nc,40.00229045,116.32098908,Tsinghua University,edu,6adecb82edbf84a0097ff623428f4f1936e31de0,citation,https://doi.org/10.1007/s11760-011-0246-4,Client-specific A-stack model for adult face verification across aging,2011 +196,MORPH Non-Commercial,morph_nc,1.3037257,103.7737763,"Advanced Digital Sciences Center, Singapore",edu,fcb97ede372c5bddde7a61924ac2fd29788c82ce,citation,https://doi.org/10.1109/TSMCC.2012.2192727,Ordinary Preserving Manifold Analysis for Human Age and Head Pose Estimation,2013 +197,MORPH Non-Commercial,morph_nc,1.3484104,103.68297965,Nanyang Technological University,edu,fcb97ede372c5bddde7a61924ac2fd29788c82ce,citation,https://doi.org/10.1109/TSMCC.2012.2192727,Ordinary Preserving Manifold Analysis for Human Age and Head Pose Estimation,2013 +198,MORPH Non-Commercial,morph_nc,36.3697191,127.362537,Korea Advanced Institute of Science and Technology,edu,cb27b45329d61f5f95ed213798d4b2a615e76be2,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8329236,Deep Facial Age Estimation Using Conditional Multitask Learning With Weak Label Expansion,2018 +199,MORPH Non-Commercial,morph_nc,37.2520226,127.0555019,"Samsung SAIT, Korea",company,cb27b45329d61f5f95ed213798d4b2a615e76be2,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8329236,Deep Facial Age Estimation Using Conditional Multitask Learning With Weak Label Expansion,2018 +200,MORPH Non-Commercial,morph_nc,35.14479945,33.90492318,Eastern Mediterranean University,edu,c5421a18583f629b49ca20577022f201692c4f5d,citation,http://pdfs.semanticscholar.org/c542/1a18583f629b49ca20577022f201692c4f5d.pdf,Facial Age Classification using Subpattern-based Approaches,2011 +201,MORPH Non-Commercial,morph_nc,40.0044795,116.370238,Chinese Academy of Sciences,edu,68c4a1d438ea1c6dfba92e3aee08d48f8e7f7090,citation,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Liu_AgeNet_Deeply_Learned_ICCV_2015_paper.pdf,AgeNet: Deeply Learned Regressor and Classifier for Robust Apparent Age Estimation,2015 +202,MORPH Non-Commercial,morph_nc,31.32235655,121.38400941,Shanghai University,edu,5f0d4a0b5f72d8700cdf8cb179263a8fa866b59b,citation,https://pdfs.semanticscholar.org/5f0d/4a0b5f72d8700cdf8cb179263a8fa866b59b.pdf,Memo No . 85 06 / 2018 Deep Regression Forests for Age Estimation,2018 +203,MORPH Non-Commercial,morph_nc,24.96841805,121.19139696,National Central University,edu,c58ece1a3fa23608f022e424ec5a93cddda31308,citation,https://doi.org/10.1109/JSYST.2014.2325957,Extraction of Visual Facial Features for Health Management,2016 +204,MORPH Non-Commercial,morph_nc,50.0764296,14.41802312,Czech Technical University,edu,56e25358ebfaf8a8b3c7c33ed007e24f026065d0,citation,https://doi.org/10.1007/s10994-015-5541-9,V-shaped interval insensitive loss for ordinal classification,2015 +205,MORPH Non-Commercial,morph_nc,5.7648848,102.6281702,"University Sultan Zainal Abidin, Malaysia",edu,3337cfc3de2c16dee6f7cbeda5f263409a9ad81e,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8398675,Age prediction on face features via multiple classifiers,2018 +206,MORPH Non-Commercial,morph_nc,1.2962018,103.77689944,National University of Singapore,edu,2836d68c86f29bb87537ea6066d508fde838ad71,citation,http://arxiv.org/pdf/1510.06503v1.pdf,Personalized Age Progression with Aging Dictionary,2015 +207,MORPH Non-Commercial,morph_nc,32.0565957,118.77408833,Nanjing University,edu,2836d68c86f29bb87537ea6066d508fde838ad71,citation,http://arxiv.org/pdf/1510.06503v1.pdf,Personalized Age Progression with Aging Dictionary,2015 +208,MORPH Non-Commercial,morph_nc,22.42031295,114.20788644,Chinese University of Hong Kong,edu,55966926e7c28b1eee1c7eb7a0b11b10605a1af0,citation,http://pdfs.semanticscholar.org/baa8/bdeb5aa545af5b5f43efaf9dda08490da0bc.pdf,Surpassing Human-Level Face Verification Performance on LFW with GaussianFace,2015 +209,MORPH Non-Commercial,morph_nc,40.0044795,116.370238,Chinese Academy of Sciences,edu,d492dbfaa42b4f8b8a74786d7343b3be6a3e9a1d,citation,https://pdfs.semanticscholar.org/d492/dbfaa42b4f8b8a74786d7343b3be6a3e9a1d.pdf,Deep Cost-Sensitive and Order-Preserving Feature Learning for Cross-Population Age Estimation,0 +210,MORPH Non-Commercial,morph_nc,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,d492dbfaa42b4f8b8a74786d7343b3be6a3e9a1d,citation,https://pdfs.semanticscholar.org/d492/dbfaa42b4f8b8a74786d7343b3be6a3e9a1d.pdf,Deep Cost-Sensitive and Order-Preserving Feature Learning for Cross-Population Age Estimation,0 +211,MORPH Non-Commercial,morph_nc,34.67567405,33.04577648,Cyprus University of Technology,edu,fa518a033b1f6299d1826389bd1520cf52291b56,citation,https://pdfs.semanticscholar.org/fa51/8a033b1f6299d1826389bd1520cf52291b56.pdf,Facial Age Simulation using Age-specific 3D Models and Recursive PCA,2013 +212,MORPH Non-Commercial,morph_nc,38.83133325,-77.30798839,George Mason University,edu,1c147261f5ab1b8ee0a54021a3168fa191096df8,citation,http://pdfs.semanticscholar.org/1c14/7261f5ab1b8ee0a54021a3168fa191096df8.pdf,Face Recognition across Time Lapse Using Convolutional Neural Networks,2016 +213,MORPH Non-Commercial,morph_nc,32.05765485,118.7550004,HoHai University,edu,b84b7b035c574727e4c30889e973423fe15560d7,citation,http://pdfs.semanticscholar.org/b84b/7b035c574727e4c30889e973423fe15560d7.pdf,Human Age Estimation Using Ranking SVM,2012 +214,MORPH Non-Commercial,morph_nc,40.0044795,116.370238,Chinese Academy of Sciences,edu,b84b7b035c574727e4c30889e973423fe15560d7,citation,http://pdfs.semanticscholar.org/b84b/7b035c574727e4c30889e973423fe15560d7.pdf,Human Age Estimation Using Ranking SVM,2012 +215,MORPH Non-Commercial,morph_nc,39.6810328,-75.7540184,University of Delaware,edu,19da9f3532c2e525bf92668198b8afec14f9efea,citation,http://pdfs.semanticscholar.org/19da/9f3532c2e525bf92668198b8afec14f9efea.pdf,Challenge: Face verification across age progression using real-world data,2011 +216,MORPH Non-Commercial,morph_nc,39.95472495,-75.15346905,Temple University,edu,f24e379e942e134d41c4acec444ecf02b9d0d3a9,citation,http://pdfs.semanticscholar.org/f24e/379e942e134d41c4acec444ecf02b9d0d3a9.pdf,Analysis of Facial Images across Age Progression by Humans,2011 +217,MORPH Non-Commercial,morph_nc,39.65404635,-79.96475355,West Virginia University,edu,f24e379e942e134d41c4acec444ecf02b9d0d3a9,citation,http://pdfs.semanticscholar.org/f24e/379e942e134d41c4acec444ecf02b9d0d3a9.pdf,Analysis of Facial Images across Age Progression by Humans,2011 +218,MORPH Non-Commercial,morph_nc,40.00229045,116.32098908,Tsinghua University,edu,51f626540860ad75b68206025a45466a6d087aa6,citation,https://doi.org/10.1109/ICIP.2017.8296595,Cluster convolutional neural networks for facial age estimation,2017 +219,MORPH Non-Commercial,morph_nc,37.4102193,-122.05965487,Carnegie Mellon University,edu,452ea180cf4d08d7500fc4bc046fd7141fd3d112,citation,https://doi.org/10.1109/BTAS.2012.6374569,A robust approach to facial ethnicity classification on large scale face databases,2012 +220,MORPH Non-Commercial,morph_nc,47.3764534,8.54770931,ETH Zürich,edu,2facf3e85240042a02f289a0d40fee376c478d0f,citation,https://doi.org/10.1109/BTAS.2010.5634544,Aging face verification in score-age space using single reference image template,2010 +221,MORPH Non-Commercial,morph_nc,38.88140235,121.52281098,Dalian University of Technology,edu,ed70d1a9435c0b32c0c75c1a062f4f07556f7016,citation,https://doi.org/10.1109/ICIP.2015.7350774,Correlated warped Gaussian processes for gender-specific age estimation,2015 +222,MORPH Non-Commercial,morph_nc,40.0141905,-83.0309143,University of Electronic Science and Technology of China,edu,ed70d1a9435c0b32c0c75c1a062f4f07556f7016,citation,https://doi.org/10.1109/ICIP.2015.7350774,Correlated warped Gaussian processes for gender-specific age estimation,2015 +223,MORPH Non-Commercial,morph_nc,1.2962018,103.77689944,National University of Singapore,edu,0e5557a0cc58194ad53fab5dd6f4d4195d19ce4e,citation,https://doi.org/10.1109/TMM.2015.2500730,Deep Aging Face Verification With Large Gaps,2016 +224,MORPH Non-Commercial,morph_nc,51.52344665,-0.25973535,"North Acton, London",edu,0e5557a0cc58194ad53fab5dd6f4d4195d19ce4e,citation,https://doi.org/10.1109/TMM.2015.2500730,Deep Aging Face Verification With Large Gaps,2016 +225,MORPH Non-Commercial,morph_nc,31.846918,117.29053367,Hefei University of Technology,edu,0e5557a0cc58194ad53fab5dd6f4d4195d19ce4e,citation,https://doi.org/10.1109/TMM.2015.2500730,Deep Aging Face Verification With Large Gaps,2016 +226,MORPH Non-Commercial,morph_nc,29.58333105,-98.61944505,University of Texas at San Antonio,edu,f2896dd2701fbb3564492a12c64f11a5ad456a67,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5495414,Cross-database age estimation based on transfer learning,2010 +227,MORPH Non-Commercial,morph_nc,34.1235825,108.83546,Xidian University,edu,f2896dd2701fbb3564492a12c64f11a5ad456a67,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5495414,Cross-database age estimation based on transfer learning,2010 +228,MORPH Non-Commercial,morph_nc,56.66340325,12.87929727,Halmstad University,edu,9cda3e56cec21bd8f91f7acfcefc04ac10973966,citation,https://doi.org/10.1109/IWBF.2016.7449688,"Periocular biometrics: databases, algorithms and directions",2016 +229,MORPH Non-Commercial,morph_nc,34.2375581,-77.9270129,University of North Carolina Wilmington,edu,13aef395f426ca8bd93640c9c3f848398b189874,citation,https://pdfs.semanticscholar.org/13ae/f395f426ca8bd93640c9c3f848398b189874.pdf,1 Image Preprocessing and Complete 2 DPCA with Feature Extraction for Gender Recognition NSF REU 2017 : Statistical Learning and Data Mining,2017 +230,MORPH Non-Commercial,morph_nc,24.7925484,120.9951183,National Tsing Hua University,edu,cfa40560fa74b2fb5c26bdd6ea7c610ba5130e2f,citation,https://doi.org/10.1109/TIFS.2013.2286265,Subspace Learning for Facial Age Estimation Via Pairwise Age Ranking,2013 +231,MORPH Non-Commercial,morph_nc,58.38131405,26.72078081,University of Tartu,edu,1b248ed8e7c9514648cd598960fadf9ab17e7fe8,citation,https://pdfs.semanticscholar.org/1b24/8ed8e7c9514648cd598960fadf9ab17e7fe8.pdf,"From apparent to real age: gender, age, ethnic, makeup, and expression bias analysis in real age estimation",0 +232,MORPH Non-Commercial,morph_nc,41.3868913,2.16352385,University of Barcelona,edu,1b248ed8e7c9514648cd598960fadf9ab17e7fe8,citation,https://pdfs.semanticscholar.org/1b24/8ed8e7c9514648cd598960fadf9ab17e7fe8.pdf,"From apparent to real age: gender, age, ethnic, makeup, and expression bias analysis in real age estimation",0 +233,MORPH Non-Commercial,morph_nc,39.65404635,-79.96475355,West Virginia University,edu,86a8b3d0f753cb49ac3250fa14d277983e30a4b7,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2013.75,Exploiting Unlabeled Ages for Aging Pattern Analysis on a Large Database,2013 +234,MORPH Non-Commercial,morph_nc,34.2239869,-77.8701325,"UNCW, USA",edu,2b5cb5466eecb131f06a8100dcaf0c7a0e30d391,citation,http://doi.acm.org/10.1145/1924559.1924608,A comparative study of active appearance model annotation schemes for the face,2010 +235,MORPH Non-Commercial,morph_nc,42.718568,-84.47791571,Michigan State University,edu,fc798314994bf94d1cde8d615ba4d5e61b6268b6,citation,http://pdfs.semanticscholar.org/fc79/8314994bf94d1cde8d615ba4d5e61b6268b6.pdf,"Face Recognition : face in video , age invariance , and facial marks",2009 +236,MORPH Non-Commercial,morph_nc,24.12084345,120.67571165,National Chung Hsing University,edu,635d2696aa597a278dd6563f079be06aa76a33c0,citation,https://doi.org/10.1109/ICIP.2016.7532429,Age estimation via fusion of multiple binary age grouping systems,2016 +237,MORPH Non-Commercial,morph_nc,25.01682835,121.53846924,National Taiwan University,edu,635d2696aa597a278dd6563f079be06aa76a33c0,citation,https://doi.org/10.1109/ICIP.2016.7532429,Age estimation via fusion of multiple binary age grouping systems,2016 +238,MORPH Non-Commercial,morph_nc,25.0411727,121.6146518,"Academia Sinica, Taiwan",edu,635d2696aa597a278dd6563f079be06aa76a33c0,citation,https://doi.org/10.1109/ICIP.2016.7532429,Age estimation via fusion of multiple binary age grouping systems,2016 +239,MORPH Non-Commercial,morph_nc,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,36486944b4feeb88c0499fecd253c5a53034a23f,citation,https://doi.org/10.1109/CISP-BMEI.2017.8301986,Deep feature selection and projection for cross-age face retrieval,2017 +240,MORPH Non-Commercial,morph_nc,1.2988926,103.7873107,"Institute for Infocomm Research, Singapore",edu,85f7f03b79d03da5fae3a7f79d9aac228a635166,citation,https://doi.org/10.1109/WACV.2009.5403085,Age categorization via ECOC with fused gabor and LBP features,2009 +241,MORPH Non-Commercial,morph_nc,39.6810328,-75.7540184,University of Delaware,edu,aee3427d0814d8a398fd31f4f46941e9e5488d83,citation,http://dl.acm.org/citation.cfm?id=1924573,Face verification with aging using AdaBoost and local binary patterns,2010 +242,MORPH Non-Commercial,morph_nc,23.09461185,113.28788994,Sun Yat-Sen University,edu,d1b5b3e4b803dc4e50c5b80c1bc69c6d98751698,citation,https://doi.org/10.1109/LSP.2017.2661983,Modified Hidden Factor Analysis for Cross-Age Face Recognition,2017 +243,MORPH Non-Commercial,morph_nc,39.65404635,-79.96475355,West Virginia University,edu,55bc7abcef8266d76667896bbc652d081d00f797,citation,http://www.cse.msu.edu/~rossarun/pubs/ChenCosmeticsGenderAge_VISAPP2014.pdf,Impact of facial cosmetics on automatic gender and age estimation algorithms,2014 +244,MORPH Non-Commercial,morph_nc,42.718568,-84.47791571,Michigan State University,edu,55bc7abcef8266d76667896bbc652d081d00f797,citation,http://www.cse.msu.edu/~rossarun/pubs/ChenCosmeticsGenderAge_VISAPP2014.pdf,Impact of facial cosmetics on automatic gender and age estimation algorithms,2014 +245,MORPH Non-Commercial,morph_nc,39.65404635,-79.96475355,West Virginia University,edu,7a65fc9e78eff3ab6062707deaadde024d2fad40,citation,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Zhu_A_Study_on_ICCV_2015_paper.pdf,A Study on Apparent Age Estimation,2015 +246,MORPH Non-Commercial,morph_nc,42.357757,-83.06286711,Wayne State University,edu,28d99dc2d673d62118658f8375b414e5192eac6f,citation,http://www.cs.wayne.edu/~mdong/cvpr17.pdf,Using Ranking-CNN for Age Estimation,2017 +247,MORPH Non-Commercial,morph_nc,37.4102193,-122.05965487,Carnegie Mellon University,edu,ec05078be14a11157ac0e1c6b430ac886124589b,citation,http://pdfs.semanticscholar.org/ec05/078be14a11157ac0e1c6b430ac886124589b.pdf,Longitudinal Face Aging in the Wild - Recent Deep Learning Approaches,2018 +248,MORPH Non-Commercial,morph_nc,45.57022705,-122.63709346,Concordia University,edu,ec05078be14a11157ac0e1c6b430ac886124589b,citation,http://pdfs.semanticscholar.org/ec05/078be14a11157ac0e1c6b430ac886124589b.pdf,Longitudinal Face Aging in the Wild - Recent Deep Learning Approaches,2018 +249,MORPH Non-Commercial,morph_nc,46.5190557,6.5667576,"Swiss Federal Institute of Technology Lausanne, Switzerland",edu,d7a84db2a1bf7b97657b0250f354f249394dd700,citation,https://doi.org/10.1109/ICIP.2010.5653518,Global and local feature based multi-classifier A-stack model for aging face identification,2010 +250,MORPH Non-Commercial,morph_nc,39.65404635,-79.96475355,West Virginia University,edu,d3c004125c71942846a9b32ae565c5216c068d1e,citation,http://pdfs.semanticscholar.org/d3c0/04125c71942846a9b32ae565c5216c068d1e.pdf,Recognizing Age-Separated Face Images: Humans and Machines,2014 +251,MORPH Non-Commercial,morph_nc,52.3553655,4.9501644,University of Amsterdam,edu,999289b0ef76c4c6daa16a4f42df056bf3d68377,citation,http://pdfs.semanticscholar.org/9992/89b0ef76c4c6daa16a4f42df056bf3d68377.pdf,The Role of Color and Contrast in Facial Age Estimation,2014 +252,MORPH Non-Commercial,morph_nc,51.99882735,4.37396037,Delft University of Technology,edu,999289b0ef76c4c6daa16a4f42df056bf3d68377,citation,http://pdfs.semanticscholar.org/9992/89b0ef76c4c6daa16a4f42df056bf3d68377.pdf,The Role of Color and Contrast in Facial Age Estimation,2014 +253,MORPH Non-Commercial,morph_nc,28.5456282,77.2731505,"IIIT Delhi, India",edu,f726738954e7055bb3615fa7e8f59f136d3e0bdc,citation,https://arxiv.org/pdf/1803.07385.pdf,Are you eligible? Predicting adulthood from face images via class specific mean autoencoder,2018 +254,MORPH Non-Commercial,morph_nc,1.2962018,103.77689944,National University of Singapore,edu,b9d68dbeb8e5fdc5984b49a317ea6798b378e5ae,citation,http://doi.acm.org/10.1145/2733373.2807962,What Shall I Look Like after N Years?,2015 +255,MORPH Non-Commercial,morph_nc,32.0565957,118.77408833,Nanjing University,edu,b9d68dbeb8e5fdc5984b49a317ea6798b378e5ae,citation,http://doi.acm.org/10.1145/2733373.2807962,What Shall I Look Like after N Years?,2015 +256,MORPH Non-Commercial,morph_nc,45.42580475,-75.68740118,University of Ottawa,edu,16820ccfb626dcdc893cc7735784aed9f63cbb70,citation,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W12/papers/Azarmehr_Real-Time_Embedded_Age_2015_CVPR_paper.pdf,Real-time embedded age and gender classification in unconstrained video,2015 +257,MORPH Non-Commercial,morph_nc,35.0274996,135.78154513,University of Caen,edu,0ad8149318912b5449085187eb3521786a37bc78,citation,http://arxiv.org/abs/1604.02975,CP-mtML: Coupled Projection Multi-Task Metric Learning for Large Scale Face Retrieval,2016 +258,MORPH Non-Commercial,morph_nc,51.44415765,7.26096541,Ruhr-University Bochum,edu,7e1ea2679a110241ed0dd38ff45cd4dfeb7a8e83,citation,http://pdfs.semanticscholar.org/7e1e/a2679a110241ed0dd38ff45cd4dfeb7a8e83.pdf,Extensions of Hierarchical Slow Feature Analysis for Efficient Classification and Regression on High-Dimensional Data,2017 +259,MORPH Non-Commercial,morph_nc,30.5097537,114.4062881,Huazhong University of Science and Technology,edu,2e27667421a7eeab278e0b761db4d2c725683c3f,citation,https://doi.org/10.1007/s11042-013-1815-z,Effective human age estimation using a two-stage approach based on Lie Algebrized Gaussians feature,2013 +260,MORPH Non-Commercial,morph_nc,32.0565957,118.77408833,Nanjing University,edu,0c741fa0966ba3ee4fc326e919bf2f9456d0cd74,citation,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.51,Facial Age Estimation by Learning from Label Distributions,2010 +261,MORPH Non-Commercial,morph_nc,32.0575279,118.78682252,Southeast University,edu,0c741fa0966ba3ee4fc326e919bf2f9456d0cd74,citation,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.51,Facial Age Estimation by Learning from Label Distributions,2010 +262,MORPH Non-Commercial,morph_nc,-37.78397455,144.95867433,Monash University,edu,0c741fa0966ba3ee4fc326e919bf2f9456d0cd74,citation,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.51,Facial Age Estimation by Learning from Label Distributions,2010 +263,MORPH Non-Commercial,morph_nc,1.2962018,103.77689944,National University of Singapore,edu,fca9ebaa30d69ccec8bb577c31693c936c869e72,citation,https://arxiv.org/pdf/1809.00338.pdf,Look Across Elapse: Disentangled Representation Learning and Photorealistic Cross-Age Face Synthesis for Age-Invariant Face Recognition,2018 +264,MORPH Non-Commercial,morph_nc,40.0044795,116.370238,Chinese Academy of Sciences,edu,fca9ebaa30d69ccec8bb577c31693c936c869e72,citation,https://arxiv.org/pdf/1809.00338.pdf,Look Across Elapse: Disentangled Representation Learning and Photorealistic Cross-Age Face Synthesis for Age-Invariant Face Recognition,2018 +265,MORPH Non-Commercial,morph_nc,49.10184375,8.4331256,Karlsruhe Institute of Technology,edu,cfdc4d0f8e1b4b9ced35317d12b4229f2e3311ab,citation,https://pdfs.semanticscholar.org/cfdc/4d0f8e1b4b9ced35317d12b4229f2e3311ab.pdf,Quaero at TRECVID 2010: Semantic Indexing,2010 +266,MORPH Non-Commercial,morph_nc,42.718568,-84.47791571,Michigan State University,edu,02d650d8a3a9daaba523433fbe93705df0a7f4b1,citation,http://pdfs.semanticscholar.org/02d6/50d8a3a9daaba523433fbe93705df0a7f4b1.pdf,How Does Aging Affect Facial Components?,2012 +267,MORPH Non-Commercial,morph_nc,34.67567405,33.04577648,Cyprus University of Technology,edu,70db3a0d2ca8a797153cc68506b8650908cb0ada,citation,http://pdfs.semanticscholar.org/70db/3a0d2ca8a797153cc68506b8650908cb0ada.pdf,An Overview of Research Activities in Facial Age Estimation Using the FG-NET Aging Database,2014 +268,MORPH Non-Commercial,morph_nc,22.5447154,113.9357164,Tencent,company,a2d1818eb461564a5153c74028e53856cf0b40fd,citation,https://arxiv.org/pdf/1810.07599.pdf,Orthogonal Deep Features Decomposition for Age-Invariant Face Recognition,2018 +269,MORPH Non-Commercial,morph_nc,57.6252103,39.8845656,Yaroslavl State University,edu,05318a267226f6d855d83e9338eaa9e718b2a8dd,citation,https://fruct.org/publications/fruct16/files/Khr.pdf,Age estimation from face images: challenging problem for audience measurement systems,2014 +270,MORPH Non-Commercial,morph_nc,41.5381124,2.4447406,"EUP Mataró, Spain",edu,1f5725a4a2eb6cdaefccbc20dccadf893936df12,citation,https://doi.org/10.1109/CCST.2012.6393544,On the relevance of age in handwritten biometric recognition,2012 +271,MORPH Non-Commercial,morph_nc,34.67567405,33.04577648,Cyprus University of Technology,edu,876583a059154def7a4bc503b21542f80859affd,citation,https://doi.org/10.1109/IWBF.2016.7449697,On the analysis of factors influencing the performance of facial age progression,2016 +272,MORPH Non-Commercial,morph_nc,-35.0636071,147.3552234,Charles Sturt University,edu,2e231f1e7e641dd3619bec59e14d02e91360ac01,citation,https://arxiv.org/pdf/1807.10421.pdf,Fusion Network for Face-Based Age Estimation,2018 +273,MORPH Non-Commercial,morph_nc,51.3791442,-2.3252332,University of Bath,edu,2e231f1e7e641dd3619bec59e14d02e91360ac01,citation,https://arxiv.org/pdf/1807.10421.pdf,Fusion Network for Face-Based Age Estimation,2018 +274,MORPH Non-Commercial,morph_nc,40.0044795,116.370238,Chinese Academy of Sciences,edu,56359d2b4508cc267d185c1d6d310a1c4c2cc8c2,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2015.7298618,Shape driven kernel adaptation in Convolutional Neural Network for robust facial trait recognition,2015 +275,MORPH Non-Commercial,morph_nc,39.9041999,116.4073963,Chinese Academy of Science,edu,56359d2b4508cc267d185c1d6d310a1c4c2cc8c2,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2015.7298618,Shape driven kernel adaptation in Convolutional Neural Network for robust facial trait recognition,2015 +276,MORPH Non-Commercial,morph_nc,1.2962018,103.77689944,National University of Singapore,edu,56359d2b4508cc267d185c1d6d310a1c4c2cc8c2,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2015.7298618,Shape driven kernel adaptation in Convolutional Neural Network for robust facial trait recognition,2015 +277,MORPH Non-Commercial,morph_nc,32.0565957,118.77408833,Nanjing University,edu,a6e43b73f9f87588783988333997a81b4487e2d5,citation,http://pdfs.semanticscholar.org/a6e4/3b73f9f87588783988333997a81b4487e2d5.pdf,Facial Age Estimation by Total Ordering Preserving Projection,2016 +278,MORPH Non-Commercial,morph_nc,1.2988926,103.7873107,"Institution for Infocomm Research, Singapore",edu,8229f2735a0db0ad41f4d7252129311f06959907,citation,https://doi.org/10.1109/TIP.2011.2106794,Active Learning for Solving the Incomplete Data Problem in Facial Age Classification by the Furthest Nearest-Neighbor Criterion,2011 +279,MORPH Non-Commercial,morph_nc,1.3484104,103.68297965,Nanyang Technological University,edu,8229f2735a0db0ad41f4d7252129311f06959907,citation,https://doi.org/10.1109/TIP.2011.2106794,Active Learning for Solving the Incomplete Data Problem in Facial Age Classification by the Furthest Nearest-Neighbor Criterion,2011 +280,MORPH Non-Commercial,morph_nc,39.2899685,-76.62196103,University of Maryland,edu,963a004e208ce4bd26fa79a570af61d31651b3c3,citation,https://doi.org/10.1016/j.jvlc.2009.01.011,Computational methods for modeling facial aging: A survey,2009 +281,MORPH Non-Commercial,morph_nc,40.48256135,-3.6906079,Universidad Autonoma de Madrid,edu,4b5ff8c67f3496a414f94e35cb35a601ec98e5cf,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6547306,Understanding the discrimination power of facial regions in forensic casework,2013 +282,MORPH Non-Commercial,morph_nc,40.4445565,-3.7122785,"Dirección General de la Guardia Civil, Madrid, Spain",edu,4b5ff8c67f3496a414f94e35cb35a601ec98e5cf,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6547306,Understanding the discrimination power of facial regions in forensic casework,2013 +283,MORPH Non-Commercial,morph_nc,-37.8087465,144.9638875,RMIT University,edu,c49075ead6eb07ede5ada4fe372899bd0cfb83ac,citation,https://doi.org/10.1109/ICSPCS.2015.7391782,Multi-stage classification network for automatic age estimation from facial images,2015 +284,MORPH Non-Commercial,morph_nc,34.2375581,-77.9270129,University of North Carolina Wilmington,edu,00301c250d667700276b1e573640ff2fd7be574d,citation,https://doi.org/10.1109/BTAS.2014.6996242,Establishing a test set and initial comparisons for quantitatively evaluating synthetic age progression for adult aging,2014 diff --git a/site/datasets/final/msceleb.csv b/site/datasets/final/msceleb.csv new file mode 100644 index 00000000..84abaea7 --- /dev/null +++ b/site/datasets/final/msceleb.csv @@ -0,0 +1,113 @@ +index,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,MsCeleb,msceleb,0.0,0.0,,,291265db88023e92bb8c8e6390438e5da148e8f5,main,http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf,MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition,2016 +1,MsCeleb,msceleb,32.0565957,118.77408833,Nanjing University,edu,e47e8fa44decf9adbcdb02f8a64b802fe33b29ef,citation,https://doi.org/10.1109/TIP.2017.2782366,Robust Distance Metric Learning via Bayesian Inference,2018 +2,MsCeleb,msceleb,48.8476037,2.2639934,"Université Paris-Saclay, France",edu,96e318f8ff91ba0b10348d4de4cb7c2142eb8ba9,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8364450,State-of-the-art face recognition performance using publicly available software and datasets,2018 +3,MsCeleb,msceleb,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,0a0321785c8beac1cbaaec4d8ad0cfd4a0d6d457,citation,https://pdfs.semanticscholar.org/0a03/21785c8beac1cbaaec4d8ad0cfd4a0d6d457.pdf,Learning Invariant Deep Representation for NIR-VIS Face Recognition,2017 +4,MsCeleb,msceleb,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,7a131fafa7058fb75fdca32d0529bc7cb50429bd,citation,https://arxiv.org/pdf/1704.04086.pdf,Beyond Face Rotation: Global and Local Perception GAN for Photorealistic and Identity Preserving Frontal View Synthesis,2017 +5,MsCeleb,msceleb,30.40550035,-91.18620474,Louisiana State University,edu,9f65319b8a33c8ec11da2f034731d928bf92e29d,citation,http://pdfs.semanticscholar.org/9f65/319b8a33c8ec11da2f034731d928bf92e29d.pdf,Taking Roll: a Pipeline for Face Recognition,2018 +6,MsCeleb,msceleb,51.49887085,-0.17560797,Imperial College London,edu,809ea255d144cff780300440d0f22c96e98abd53,citation,http://pdfs.semanticscholar.org/809e/a255d144cff780300440d0f22c96e98abd53.pdf,ArcFace: Additive Angular Margin Loss for Deep Face Recognition,2018 +7,MsCeleb,msceleb,31.28473925,121.49694909,Tongji University,edu,fe0cf8eaa5a5f59225197ef1bb8613e603cd96d4,citation,https://pdfs.semanticscholar.org/4e20/8cfff33327863b5aeef0bf9b327798a5610c.pdf,Improved Face Verification with Simple Weighted Feature Combination,2017 +8,MsCeleb,msceleb,45.7835966,4.7678948,École Centrale de Lyon,edu,486840f4f524e97f692a7f6b42cd19019ee71533,citation,https://arxiv.org/pdf/1703.08388v2.pdf,DeepVisage: Making Face Recognition Simple Yet With Powerful Generalization Skills,2017 +9,MsCeleb,msceleb,48.832493,2.267474,Safran Identity and Security,company,486840f4f524e97f692a7f6b42cd19019ee71533,citation,https://arxiv.org/pdf/1703.08388v2.pdf,DeepVisage: Making Face Recognition Simple Yet With Powerful Generalization Skills,2017 +10,MsCeleb,msceleb,31.83907195,117.26420748,University of Science and Technology of China,edu,e1256ff535bf4c024dd62faeb2418d48674ddfa2,citation,https://arxiv.org/pdf/1803.11182.pdf,Towards Open-Set Identity Preserving Face Synthesis,2018 +11,MsCeleb,msceleb,51.7534538,-1.25400997,University of Oxford,edu,5812d8239d691e99d4108396f8c26ec0619767a6,citation,https://arxiv.org/pdf/1810.09951.pdf,GhostVLAD for set-based face recognition,2018 +12,MsCeleb,msceleb,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,0b8b8776684009e537b9e2c0d87dbd56708ddcb4,citation,http://pdfs.semanticscholar.org/0b8b/8776684009e537b9e2c0d87dbd56708ddcb4.pdf,Adversarial Discriminative Heterogeneous Face Recognition,2017 +13,MsCeleb,msceleb,1.2962018,103.77689944,National University of Singapore,edu,fca9ebaa30d69ccec8bb577c31693c936c869e72,citation,https://arxiv.org/pdf/1809.00338.pdf,Look Across Elapse: Disentangled Representation Learning and Photorealistic Cross-Age Face Synthesis for Age-Invariant Face Recognition,2018 +14,MsCeleb,msceleb,40.0044795,116.370238,Chinese Academy of Sciences,edu,fca9ebaa30d69ccec8bb577c31693c936c869e72,citation,https://arxiv.org/pdf/1809.00338.pdf,Look Across Elapse: Disentangled Representation Learning and Photorealistic Cross-Age Face Synthesis for Age-Invariant Face Recognition,2018 +15,MsCeleb,msceleb,40.0044795,116.370238,Chinese Academy of Sciences,edu,212608e00fc1e8912ff845ee7a4a67f88ba938fc,citation,https://arxiv.org/pdf/1704.02450.pdf,Coupled Deep Learning for Heterogeneous Face Recognition,2018 +16,MsCeleb,msceleb,45.7413921,126.62552755,Harbin Institute of Technology,edu,b73795963dc623a634d218d29e4a5b74dfbc79f1,citation,https://arxiv.org/pdf/1807.08772.pdf,Identity Preserving Face Completion for Large Ocular Region Occlusion,2018 +17,MsCeleb,msceleb,38.0333742,-84.5017758,University of Kentucky,edu,b73795963dc623a634d218d29e4a5b74dfbc79f1,citation,https://arxiv.org/pdf/1807.08772.pdf,Identity Preserving Face Completion for Large Ocular Region Occlusion,2018 +18,MsCeleb,msceleb,34.0224149,-118.28634407,University of Southern California,edu,b73795963dc623a634d218d29e4a5b74dfbc79f1,citation,https://arxiv.org/pdf/1807.08772.pdf,Identity Preserving Face Completion for Large Ocular Region Occlusion,2018 +19,MsCeleb,msceleb,35.6924853,139.7582533,"National Institute of Informatics, Japan",edu,102280e80470ace006e14d6ec9adda082603dea1,citation,https://arxiv.org/pdf/1804.04418.pdf,Transformation on Computer-Generated Facial Image to Avoid Detection by Spoofing Detector,2018 +20,MsCeleb,msceleb,55.94951105,-3.19534913,University of Edinburgh,edu,102280e80470ace006e14d6ec9adda082603dea1,citation,https://arxiv.org/pdf/1804.04418.pdf,Transformation on Computer-Generated Facial Image to Avoid Detection by Spoofing Detector,2018 +21,MsCeleb,msceleb,42.4505507,-76.4783513,Cornell University,edu,bd379f8e08f88729a9214260e05967f4ca66cd65,citation,https://arxiv.org/pdf/1711.06148.pdf,Learning Compositional Visual Concepts with Mutual Consistency,2017 +22,MsCeleb,msceleb,22.42031295,114.20788644,Chinese University of Hong Kong,edu,58d76380d194248b3bb291b8c7c5137a0a376897,citation,https://pdfs.semanticscholar.org/58d7/6380d194248b3bb291b8c7c5137a0a376897.pdf,FaceID-GAN : Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis,2018 +23,MsCeleb,msceleb,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,58d76380d194248b3bb291b8c7c5137a0a376897,citation,https://pdfs.semanticscholar.org/58d7/6380d194248b3bb291b8c7c5137a0a376897.pdf,FaceID-GAN : Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis,2018 +24,MsCeleb,msceleb,42.3383668,-71.08793524,Northeastern University,edu,f74917fc0e55f4f5682909dcf6929abd19d33e2e,citation,http://pdfs.semanticscholar.org/f749/17fc0e55f4f5682909dcf6929abd19d33e2e.pdf,Gan Quality Index (gqi) by Gan-induced Classifier,2018 +25,MsCeleb,msceleb,40.8722825,-73.89489171,City University of New York,edu,f74917fc0e55f4f5682909dcf6929abd19d33e2e,citation,http://pdfs.semanticscholar.org/f749/17fc0e55f4f5682909dcf6929abd19d33e2e.pdf,Gan Quality Index (gqi) by Gan-induced Classifier,2018 +26,MsCeleb,msceleb,39.2899685,-76.62196103,University of Maryland,edu,def2983576001bac7d6461d78451159800938112,citation,https://arxiv.org/pdf/1705.07426.pdf,The Do’s and Don’ts for CNN-Based Face Verification,2017 +27,MsCeleb,msceleb,37.4102193,-122.05965487,Carnegie Mellon University,edu,2b869d5551b10f13bf6fcdb8d13f0aa4d1f59fc4,citation,https://arxiv.org/pdf/1803.00130.pdf,Ring loss: Convex Feature Normalization for Face Recognition,2018 +28,MsCeleb,msceleb,39.2899685,-76.62196103,University of Maryland,edu,b6f758be954d34817d4ebaa22b30c63a4b8ddb35,citation,http://arxiv.org/abs/1703.04835,A Proximity-Aware Hierarchical Clustering of Faces,2017 +29,MsCeleb,msceleb,23.09461185,113.28788994,Sun Yat-Sen University,edu,44f48a4b1ef94a9104d063e53bf88a69ff0f55f3,citation,http://pdfs.semanticscholar.org/44f4/8a4b1ef94a9104d063e53bf88a69ff0f55f3.pdf,Automatically Building Face Datasets of New Domains from Weakly Labeled Data with Pretrained Models,2016 +30,MsCeleb,msceleb,22.42031295,114.20788644,Chinese University of Hong Kong,edu,2011d4da646f794456bebb617d1500ddf71989ed,citation,https://pdfs.semanticscholar.org/2011/d4da646f794456bebb617d1500ddf71989ed.pdf,Transductive Centroid Projection for Semi-supervised Large-Scale Recognition,2018 +31,MsCeleb,msceleb,39.993008,116.329882,SenseTime,company,2011d4da646f794456bebb617d1500ddf71989ed,citation,https://pdfs.semanticscholar.org/2011/d4da646f794456bebb617d1500ddf71989ed.pdf,Transductive Centroid Projection for Semi-supervised Large-Scale Recognition,2018 +32,MsCeleb,msceleb,22.42031295,114.20788644,Chinese University of Hong Kong,edu,9e182e0cd9d70f876f1be7652c69373bcdf37fb4,citation,https://arxiv.org/pdf/1807.07860.pdf,Talking Face Generation by Adversarially Disentangled Audio-Visual Representation,2018 +33,MsCeleb,msceleb,49.2767454,-122.91777375,Simon Fraser University,edu,e8ef22b6da1dd3a4e014b96e6073a7b610fd97ea,citation,https://arxiv.org/pdf/1803.06340.pdf,Faces as Lighting Probes via Unsupervised Deep Highlight Extraction,2018 +34,MsCeleb,msceleb,28.2290209,112.99483204,"National University of Defense Technology, China",edu,e8ef22b6da1dd3a4e014b96e6073a7b610fd97ea,citation,https://arxiv.org/pdf/1803.06340.pdf,Faces as Lighting Probes via Unsupervised Deep Highlight Extraction,2018 +35,MsCeleb,msceleb,39.977217,116.337632,Microsoft Research Asia,company,e8ef22b6da1dd3a4e014b96e6073a7b610fd97ea,citation,https://arxiv.org/pdf/1803.06340.pdf,Faces as Lighting Probes via Unsupervised Deep Highlight Extraction,2018 +36,MsCeleb,msceleb,22.42031295,114.20788644,Chinese University of Hong Kong,edu,d80a3d1f3a438e02a6685e66ee908446766fefa9,citation,https://arxiv.org/pdf/1708.09687.pdf,Quantifying Facial Age by Posterior of Age Comparisons,2017 +37,MsCeleb,msceleb,40.0044795,116.370238,Chinese Academy of Sciences,edu,4cdb6144d56098b819076a8572a664a2c2d27f72,citation,https://arxiv.org/pdf/1806.01196.pdf,Face Synthesis for Eyeglass-Robust Face Recognition,2018 +38,MsCeleb,msceleb,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,4cdb6144d56098b819076a8572a664a2c2d27f72,citation,https://arxiv.org/pdf/1806.01196.pdf,Face Synthesis for Eyeglass-Robust Face Recognition,2018 +39,MsCeleb,msceleb,51.49887085,-0.17560797,Imperial College London,edu,8e0ab1b08964393e4f9f42ca037220fe98aad7ac,citation,https://arxiv.org/pdf/1712.04695.pdf,UV-GAN: Adversarial Facial UV Map Completion for Pose-invariant Face Recognition,2017 +40,MsCeleb,msceleb,41.10427915,29.02231159,Istanbul Technical University,edu,361eaef45fccfffd5b7df12fba902490a7d24a8d,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404319,Robust deep learning features for face recognition under mismatched conditions,2018 +41,MsCeleb,msceleb,40.62984145,22.9588935,Aristotle University of Thessaloniki,edu,45b9b7fe3850ef83d39d52f6edcc0c24fcc0bc73,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7888593,Learning Neural Bag-of-Features for Large-Scale Image Retrieval,2017 +42,MsCeleb,msceleb,39.2899685,-76.62196103,University of Maryland,edu,19458454308a9f56b7de76bf7d8ff8eaa52b0173,citation,https://pdfs.semanticscholar.org/1945/8454308a9f56b7de76bf7d8ff8eaa52b0173.pdf,Deep Features for Recognizing Disguised Faces in the Wild,0 +43,MsCeleb,msceleb,1.2962018,103.77689944,National University of Singapore,edu,c71b0ed402437470f229b3fdabb88ad044c092ea,citation,https://pdfs.semanticscholar.org/c71b/0ed402437470f229b3fdabb88ad044c092ea.pdf,Dynamic Conditional Networks for Few-Shot Learning,2018 +44,MsCeleb,msceleb,28.2290209,112.99483204,"National University of Defense Technology, China",edu,c71b0ed402437470f229b3fdabb88ad044c092ea,citation,https://pdfs.semanticscholar.org/c71b/0ed402437470f229b3fdabb88ad044c092ea.pdf,Dynamic Conditional Networks for Few-Shot Learning,2018 +45,MsCeleb,msceleb,42.718568,-84.47791571,Michigan State University,edu,99daa2839213f904e279aec7cef26c1dfb768c43,citation,https://arxiv.org/pdf/1805.02283.pdf,DocFace: Matching ID Document Photos to Selfies,2018 +46,MsCeleb,msceleb,31.30104395,121.50045497,Fudan University,edu,5a259f2f5337435f841d39dada832ab24e7b3325,citation,http://doi.acm.org/10.1145/2964284.2984059,Face Recognition via Active Annotation and Learning,2016 +47,MsCeleb,msceleb,40.0044795,116.370238,Chinese Academy of Sciences,edu,5a259f2f5337435f841d39dada832ab24e7b3325,citation,http://doi.acm.org/10.1145/2964284.2984059,Face Recognition via Active Annotation and Learning,2016 +48,MsCeleb,msceleb,42.718568,-84.47791571,Michigan State University,edu,0a64f4fec592662316764283575d05913eb2135b,citation,http://pdfs.semanticscholar.org/0a64/f4fec592662316764283575d05913eb2135b.pdf,Joint Pixel and Feature-level Domain Adaptation in the Wild,2018 +49,MsCeleb,msceleb,37.4102193,-122.05965487,Carnegie Mellon University,edu,c71217b2b111a51a31cf1107c71d250348d1ff68,citation,https://arxiv.org/pdf/1703.09912.pdf,One Network to Solve Them All — Solving Linear Inverse Problems Using Deep Projection Models,2017 +50,MsCeleb,msceleb,22.42031295,114.20788644,Chinese University of Hong Kong,edu,de7f5e4ccc2f38e0c8f3f72a930ae1c43e0fdcf0,citation,https://arxiv.org/pdf/1707.03986.pdf,Merge or Not? Learning to Group Faces via Imitation Learning,2018 +51,MsCeleb,msceleb,40.47913175,-74.43168868,Rutgers University,edu,d4448f8aa320f04066cc43201d55ddd023eb712e,citation,https://pdfs.semanticscholar.org/d444/8f8aa320f04066cc43201d55ddd023eb712e.pdf,Clothing Change Aware Person Identification,0 +52,MsCeleb,msceleb,33.9928298,-81.02685168,University of South Carolina,edu,d4448f8aa320f04066cc43201d55ddd023eb712e,citation,https://pdfs.semanticscholar.org/d444/8f8aa320f04066cc43201d55ddd023eb712e.pdf,Clothing Change Aware Person Identification,0 +53,MsCeleb,msceleb,39.94976005,116.33629046,Beijing Jiaotong University,edu,d7cbedbee06293e78661335c7dd9059c70143a28,citation,https://arxiv.org/pdf/1804.07573.pdf,MobileFaceNets: Efficient CNNs for Accurate Real-time Face Verification on Mobile Devices,2018 +54,MsCeleb,msceleb,51.49887085,-0.17560797,Imperial College London,edu,40bb090a4e303f11168dce33ed992f51afe02ff7,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Deng_Marginal_Loss_for_CVPR_2017_paper.pdf,Marginal Loss for Deep Face Recognition,2017 +55,MsCeleb,msceleb,29.7207902,-95.34406271,University of Houston,edu,38d8ff137ff753f04689e6b76119a44588e143f3,citation,http://pdfs.semanticscholar.org/38d8/ff137ff753f04689e6b76119a44588e143f3.pdf,When 3D-Aided 2D Face Recognition Meets Deep Learning: An extended UR2D for Pose-Invariant Face Recognition,2017 +56,MsCeleb,msceleb,42.3383668,-71.08793524,Northeastern University,edu,c9efcd8e32dced6efa2bba64789df8d0a8e4996a,citation,http://dl.acm.org/citation.cfm?id=2984060,Deep Convolutional Neural Network with Independent Softmax for Large Scale Face Recognition,2016 +57,MsCeleb,msceleb,40.0044795,116.370238,Chinese Academy of Sciences,edu,43fe03ec1acb6ea9d05d2b22eeddb2631bd30437,citation,https://doi.org/10.1109/ICIP.2017.8296394,Weakly supervised multiscale-inception learning for web-scale face recognition,2017 +58,MsCeleb,msceleb,1.3484104,103.68297965,Nanyang Technological University,edu,a322479a6851f57a3d74d017a9cb6d71395ed806,citation,https://pdfs.semanticscholar.org/a322/479a6851f57a3d74d017a9cb6d71395ed806.pdf,Towards Pose Invariant Face Recognition in the Wild,0 +59,MsCeleb,msceleb,40.0044795,116.370238,Chinese Academy of Sciences,edu,a322479a6851f57a3d74d017a9cb6d71395ed806,citation,https://pdfs.semanticscholar.org/a322/479a6851f57a3d74d017a9cb6d71395ed806.pdf,Towards Pose Invariant Face Recognition in the Wild,0 +60,MsCeleb,msceleb,28.2290209,112.99483204,"National University of Defense Technology, China",edu,a322479a6851f57a3d74d017a9cb6d71395ed806,citation,https://pdfs.semanticscholar.org/a322/479a6851f57a3d74d017a9cb6d71395ed806.pdf,Towards Pose Invariant Face Recognition in the Wild,0 +61,MsCeleb,msceleb,1.2962018,103.77689944,National University of Singapore,edu,a322479a6851f57a3d74d017a9cb6d71395ed806,citation,https://pdfs.semanticscholar.org/a322/479a6851f57a3d74d017a9cb6d71395ed806.pdf,Towards Pose Invariant Face Recognition in the Wild,0 +62,MsCeleb,msceleb,22.42031295,114.20788644,Chinese University of Hong Kong,edu,d949fadc9b6c5c8b067fa42265ad30945f9caa99,citation,https://arxiv.org/pdf/1710.00870.pdf,Rethinking Feature Discrimination and Polymerization for Large-scale Recognition,2017 +63,MsCeleb,msceleb,22.42031295,114.20788644,Chinese University of Hong Kong,edu,6fed504da4e192fe4c2d452754d23d3db4a4e5e3,citation,http://pdfs.semanticscholar.org/85ee/d639f7367c794a6d8b38619697af3efaacfe.pdf,Learning Deep Features via Congenerous Cosine Loss for Person Recognition,2017 +64,MsCeleb,msceleb,39.9586652,116.30971281,Beijing Institute of Technology,edu,0ea7b7fff090c707684fd4dc13e0a8f39b300a97,citation,http://arxiv.org/abs/1711.06055,Integrated Face Analytics Networks through Cross-Dataset Hybrid Training,2017 +65,MsCeleb,msceleb,33.5866784,-101.87539204,Electrical and Computer Engineering,edu,0ea7b7fff090c707684fd4dc13e0a8f39b300a97,citation,http://arxiv.org/abs/1711.06055,Integrated Face Analytics Networks through Cross-Dataset Hybrid Training,2017 +66,MsCeleb,msceleb,1.2962018,103.77689944,National University of Singapore,edu,0ea7b7fff090c707684fd4dc13e0a8f39b300a97,citation,http://arxiv.org/abs/1711.06055,Integrated Face Analytics Networks through Cross-Dataset Hybrid Training,2017 +67,MsCeleb,msceleb,39.65404635,-79.96475355,West Virginia University,edu,f1245d318eb3d775e101355f5f085a9bc4a0339b,citation,https://pdfs.semanticscholar.org/f124/5d318eb3d775e101355f5f085a9bc4a0339b.pdf,Face Verification with Disguise Variations via Deep Disguise,0 +68,MsCeleb,msceleb,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,3a27d164e931c422d16481916a2fa6401b74bcef,citation,https://arxiv.org/pdf/1709.03654.pdf,Anti-Makeup: Learning A Bi-Level Adversarial Network for Makeup-Invariant Face Verification,2018 +69,MsCeleb,msceleb,42.718568,-84.47791571,Michigan State University,edu,ad2cb5c255e555d9767d526721a4c7053fa2ac58,citation,https://arxiv.org/pdf/1711.03990.pdf,Longitudinal Study of Child Face Recognition,2018 +70,MsCeleb,msceleb,50.7791703,6.06728733,RWTH Aachen University,edu,f02f0f6fcd56a9b1407045de6634df15c60a85cd,citation,http://pdfs.semanticscholar.org/f02f/0f6fcd56a9b1407045de6634df15c60a85cd.pdf,Learning Low-shot facial representations via 2D warping,2017 +71,MsCeleb,msceleb,25.01682835,121.53846924,National Taiwan University,edu,17423fe480b109e1d924314c1dddb11b084e8a42,citation,https://pdfs.semanticscholar.org/1742/3fe480b109e1d924314c1dddb11b084e8a42.pdf,Deep Disguised Faces Recognition,0 +72,MsCeleb,msceleb,42.718568,-84.47791571,Michigan State University,edu,d29eec5e047560627c16803029d2eb8a4e61da75,citation,http://pdfs.semanticscholar.org/d29e/ec5e047560627c16803029d2eb8a4e61da75.pdf,Feature Transfer Learning for Deep Face Recognition with Long-Tail Data,2018 +73,MsCeleb,msceleb,51.49887085,-0.17560797,Imperial College London,edu,9b0489f2d5739213ef8c3e2e18739c4353c3a3b7,citation,http://pdfs.semanticscholar.org/9b04/89f2d5739213ef8c3e2e18739c4353c3a3b7.pdf,Visual Data Augmentation through Learning,2018 +74,MsCeleb,msceleb,51.59029705,-0.22963221,Middlesex University,edu,9b0489f2d5739213ef8c3e2e18739c4353c3a3b7,citation,http://pdfs.semanticscholar.org/9b04/89f2d5739213ef8c3e2e18739c4353c3a3b7.pdf,Visual Data Augmentation through Learning,2018 +75,MsCeleb,msceleb,51.7534538,-1.25400997,University of Oxford,edu,44e6ce12b857aeade03a6e5d1b7fb81202c39489,citation,https://arxiv.org/pdf/1806.05622.pdf,VoxCeleb2: Deep Speaker Recognition,2018 +76,MsCeleb,msceleb,23.0502042,113.39880323,South China University of Technology,edu,4f10a7697fb2a2c626d1190db2afba83c4ffe856,citation,https://pdfs.semanticscholar.org/4f10/a7697fb2a2c626d1190db2afba83c4ffe856.pdf,Cartoon-to-Photo Facial Translation with Generative Adversarial Networks,2018 +77,MsCeleb,msceleb,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,44b827df6c433ca49bcf44f9f3ebfdc0774ee952,citation,https://doi.org/10.1109/LSP.2017.2726105,Deep Correlation Feature Learning for Face Verification in the Wild,2017 +78,MsCeleb,msceleb,40.62984145,22.9588935,Aristotle University of Thessaloniki,edu,e7b2b0538731adaacb2255235e0a07d5ccf09189,citation,https://arxiv.org/pdf/1803.10837.pdf,Learning Deep Representations with Probabilistic Knowledge Transfer,2018 +79,MsCeleb,msceleb,40.11116745,-88.22587665,"University of Illinois, Urbana-Champaign",edu,46702e0127e16a4d6a1feda3ffc5f0f123957e87,citation,https://arxiv.org/pdf/1809.06131.pdf,Revisit Multinomial Logistic Regression in Deep Learning: Data Dependent Model Initialization for Image Recognition,2018 +80,MsCeleb,msceleb,51.7534538,-1.25400997,University of Oxford,edu,eb027969f9310e0ae941e2adee2d42cdf07d938c,citation,https://arxiv.org/pdf/1710.08092.pdf,VGGFace2: A Dataset for Recognising Faces across Pose and Age,2018 +81,MsCeleb,msceleb,42.3383668,-71.08793524,Northeastern University,edu,feea73095b1be0cbae1ad7af8ba2c4fb6f316d35,citation,http://dl.acm.org/citation.cfm?id=3126693,Deep Face Recognition with Center Invariant Loss,2017 +82,MsCeleb,msceleb,22.42031295,114.20788644,Chinese University of Hong Kong,edu,2296d79753118cfcd0fecefece301557f4cb66e2,citation,https://arxiv.org/pdf/1804.03487.pdf,Exploring Disentangled Feature Representation Beyond Face Identification,2018 +83,MsCeleb,msceleb,39.993008,116.329882,SenseTime,company,2296d79753118cfcd0fecefece301557f4cb66e2,citation,https://arxiv.org/pdf/1804.03487.pdf,Exploring Disentangled Feature Representation Beyond Face Identification,2018 +84,MsCeleb,msceleb,28.2290209,112.99483204,"National University of Defense Technology, China",edu,511a8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7,citation,https://pdfs.semanticscholar.org/511a/8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7.pdf,A Community Detection Approach to Cleaning Extremely Large Face Database,2018 +85,MsCeleb,msceleb,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,39c10888a470b92b917788c57a6fd154c97b421c,citation,https://doi.org/10.1109/VCIP.2017.8305036,Joint multi-feature fusion and attribute relationships for facial attribute prediction,2017 +86,MsCeleb,msceleb,41.70456775,-86.23822026,University of Notre Dame,edu,987a649cb33302c41412419f8eeb77048aa5513e,citation,https://arxiv.org/pdf/1803.07140.pdf,Visual Psychophysics for Making Face Recognition Algorithms More Explainable,2018 +87,MsCeleb,msceleb,42.36782045,-71.12666653,Harvard University,edu,987a649cb33302c41412419f8eeb77048aa5513e,citation,https://arxiv.org/pdf/1803.07140.pdf,Visual Psychophysics for Making Face Recognition Algorithms More Explainable,2018 +88,MsCeleb,msceleb,39.2899685,-76.62196103,University of Maryland,edu,4f7b92bd678772552b3c3edfc9a7c5c4a8c60a8e,citation,https://pdfs.semanticscholar.org/4f7b/92bd678772552b3c3edfc9a7c5c4a8c60a8e.pdf,Deep Density Clustering of Unconstrained Faces,0 +89,MsCeleb,msceleb,25.0410728,121.6147562,Institute of Information Science,edu,337dd4aaca2c5f9b5d2de8e0e2401b5a8feb9958,citation,https://arxiv.org/pdf/1810.11160.pdf,Data-specific Adaptive Threshold for Face Recognition and Authentication,2018 +90,MsCeleb,msceleb,51.7534538,-1.25400997,University of Oxford,edu,f61d5f2a082c65d5330f21b6f36312cc4fab8a3b,citation,https://arxiv.org/pdf/1705.08841.pdf,Multi-Level Variational Autoencoder: Learning Disentangled Representations From Grouped Observations,2018 +91,MsCeleb,msceleb,42.4505507,-76.4783513,Cornell University,edu,dec0c26855da90876c405e9fd42830c3051c2f5f,citation,https://pdfs.semanticscholar.org/dec0/c26855da90876c405e9fd42830c3051c2f5f.pdf,Supplementary Material : Learning Compositional Visual Concepts with Mutual Consistency,2018 +92,MsCeleb,msceleb,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,01dfd60c0851c4e5a99176e99aa369e1b5f606b7,citation,https://arxiv.org/pdf/1809.01936.pdf,Disentangled Variational Representation for Heterogeneous Face Recognition,2018 +93,MsCeleb,msceleb,39.329053,-76.619425,Johns Hopkins University,edu,01dfd60c0851c4e5a99176e99aa369e1b5f606b7,citation,https://arxiv.org/pdf/1809.01936.pdf,Disentangled Variational Representation for Heterogeneous Face Recognition,2018 +94,MsCeleb,msceleb,32.0575279,118.78682252,Southeast University,edu,8ff8c64288a2f7e4e8bf8fda865820b04ab3dbe8,citation,https://pdfs.semanticscholar.org/0056/92b9fa6728df3a7f14578c43410867bba425.pdf,Age Estimation Using Expectation of Label Distribution Learning,2018 +95,MsCeleb,msceleb,32.0565957,118.77408833,Nanjing University,edu,8ff8c64288a2f7e4e8bf8fda865820b04ab3dbe8,citation,https://pdfs.semanticscholar.org/0056/92b9fa6728df3a7f14578c43410867bba425.pdf,Age Estimation Using Expectation of Label Distribution Learning,2018 +96,MsCeleb,msceleb,1.3484104,103.68297965,Nanyang Technological University,edu,47190d213caef85e8b9dd0d271dbadc29ed0a953,citation,https://arxiv.org/pdf/1807.11649.pdf,The Devil of Face Recognition is in the Noise,2018 +97,MsCeleb,msceleb,32.87935255,-117.23110049,"University of California, San Diego",edu,47190d213caef85e8b9dd0d271dbadc29ed0a953,citation,https://arxiv.org/pdf/1807.11649.pdf,The Devil of Face Recognition is in the Noise,2018 +98,MsCeleb,msceleb,39.2899685,-76.62196103,University of Maryland,edu,872dfdeccf99bbbed7c8f1ea08afb2d713ebe085,citation,https://arxiv.org/pdf/1703.09507.pdf,L2-constrained Softmax Loss for Discriminative Face Verification,2017 +99,MsCeleb,msceleb,33.5866784,-101.87539204,Electrical and Computer Engineering,edu,ebb3d5c70bedf2287f9b26ac0031004f8f617b97,citation,https://doi.org/10.1109/MSP.2017.2764116,"Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans",2018 +100,MsCeleb,msceleb,39.2899685,-76.62196103,University of Maryland,edu,ebb3d5c70bedf2287f9b26ac0031004f8f617b97,citation,https://doi.org/10.1109/MSP.2017.2764116,"Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans",2018 +101,MsCeleb,msceleb,37.4102193,-122.05965487,Carnegie Mellon University,edu,d35534f3f59631951011539da2fe83f2844ca245,citation,https://arxiv.org/pdf/1705.07904.pdf,Semantically Decomposing the Latent Spaces of Generative Adversarial Networks,2017 +102,MsCeleb,msceleb,37.43131385,-122.16936535,Stanford University,edu,d35534f3f59631951011539da2fe83f2844ca245,citation,https://arxiv.org/pdf/1705.07904.pdf,Semantically Decomposing the Latent Spaces of Generative Adversarial Networks,2017 +103,MsCeleb,msceleb,32.87935255,-117.23110049,"University of California, San Diego",edu,d35534f3f59631951011539da2fe83f2844ca245,citation,https://arxiv.org/pdf/1705.07904.pdf,Semantically Decomposing the Latent Spaces of Generative Adversarial Networks,2017 +104,MsCeleb,msceleb,37.3936717,-122.0807262,Facebook,company,628a3f027b7646f398c68a680add48c7969ab1d9,citation,https://pdfs.semanticscholar.org/628a/3f027b7646f398c68a680add48c7969ab1d9.pdf,Plan for Final Year Project : HKU-Face : A Large Scale Dataset for Deep Face Recognition,2017 +105,MsCeleb,msceleb,51.7534538,-1.25400997,University of Oxford,edu,313d5eba97fe064bdc1f00b7587a4b3543ef712a,citation,https://pdfs.semanticscholar.org/cb7f/93467b0ec1afd43d995e511f5d7bf052a5af.pdf,Compact Deep Aggregation for Set Retrieval,2018 +106,MsCeleb,msceleb,42.718568,-84.47791571,Michigan State University,edu,b446bcd7fb78adfe346cf7a01a38e4f43760f363,citation,http://pdfs.semanticscholar.org/b446/bcd7fb78adfe346cf7a01a38e4f43760f363.pdf,To appear in ICB 2018 Longitudinal Study of Child Face Recognition,2017 +107,MsCeleb,msceleb,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,cd6aaa37fffd0b5c2320f386be322b8adaa1cc68,citation,https://arxiv.org/pdf/1804.06655.pdf,Deep Face Recognition: A Survey,2018 +108,MsCeleb,msceleb,22.42031295,114.20788644,Chinese University of Hong Kong,edu,831b4d8b0c0173b0bac0e328e844a0fbafae6639,citation,https://arxiv.org/pdf/1809.01407.pdf,Consensus-Driven Propagation in Massive Unlabeled Data for Face Recognition,2018 +109,MsCeleb,msceleb,1.3484104,103.68297965,Nanyang Technological University,edu,831b4d8b0c0173b0bac0e328e844a0fbafae6639,citation,https://arxiv.org/pdf/1809.01407.pdf,Consensus-Driven Propagation in Massive Unlabeled Data for Face Recognition,2018 +110,MsCeleb,msceleb,29.7207902,-95.34406271,University of Houston,edu,3cb2841302af1fb9656f144abc79d4f3d0b27380,citation,https://pdfs.semanticscholar.org/3cb2/841302af1fb9656f144abc79d4f3d0b27380.pdf,When 3 D-Aided 2 D Face Recognition Meets Deep Learning : An extended UR 2 D for Pose-Invariant Face Recognition,2017 +111,MsCeleb,msceleb,39.2899685,-76.62196103,University of Maryland,edu,23dd8d17ce09c22d367e4d62c1ccf507bcbc64da,citation,https://pdfs.semanticscholar.org/23dd/8d17ce09c22d367e4d62c1ccf507bcbc64da.pdf,Deep Density Clustering of Unconstrained Faces ( Supplementary Material ),2018 diff --git a/site/datasets/final/pipa.csv b/site/datasets/final/pipa.csv new file mode 100644 index 00000000..c68e70b6 --- /dev/null +++ b/site/datasets/final/pipa.csv @@ -0,0 +1,37 @@ +index,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,PIPA,pipa,0.0,0.0,,,0a85bdff552615643dd74646ac881862a7c7072d,main,https://doi.org/10.1109/CVPR.2015.7299113,Beyond frontal faces: Improving Person Recognition using multiple cues,2015 +1,PIPA,pipa,49.2579566,7.04577417,Max Planck Institute for Informatics,edu,bfc04ce7752fac884cf5a78b30ededfd5a0ad109,citation,https://arxiv.org/pdf/1804.04779.pdf,A Hybrid Model for Identity Obfuscation by Face Replacement,2018 +2,PIPA,pipa,28.59899755,-81.19712501,University of Central Florida,edu,2b339ece73e3787f445c5b92078e8f82c9b1c522,citation,http://pdfs.semanticscholar.org/7a2e/e06aaa3f342937225272951c0b6dd4309a7a.pdf,"Human Re-identification in Crowd Videos Using Personal, Social and Environmental Constraints",2016 +3,PIPA,pipa,49.2579566,7.04577417,Max Planck Institute for Informatics,edu,bc27434e376db89fe0e6ef2d2fabc100d2575ec6,citation,https://arxiv.org/pdf/1607.08438.pdf,Faceless Person Recognition; Privacy Implications in Social Media,2016 +4,PIPA,pipa,49.2579566,7.04577417,Max Planck Institute for Informatics,edu,0c59071ddd33849bd431165bc2d21bbe165a81e0,citation,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Oh_Person_Recognition_in_ICCV_2015_paper.pdf,Person Recognition in Personal Photo Collections,2015 +5,PIPA,pipa,49.2579566,7.04577417,Max Planck Institute for Informatics,edu,23429ef60e7a9c0e2f4d81ed1b4e47cc2616522f,citation,https://arxiv.org/pdf/1704.06456.pdf,A Domain Based Approach to Social Relation Recognition,2017 +6,PIPA,pipa,49.2579566,7.04577417,Max Planck Institute for Informatics,edu,ba7b12c8e2ff3c5e4e0f70b58215b41b18ff8feb,citation,https://arxiv.org/pdf/1711.09001.pdf,Natural and Effective Obfuscation by Head Inpainting,2017 +7,PIPA,pipa,49.2579566,7.04577417,Max Planck Institute for Informatics,edu,808b685d09912cbef4a009e74e10476304b4cccf,citation,http://pdfs.semanticscholar.org/808b/685d09912cbef4a009e74e10476304b4cccf.pdf,From Understanding to Controlling Privacy against Automatic Person Recognition in Social Media,2017 +8,PIPA,pipa,42.4505507,-76.4783513,Cornell University,edu,0da75b0d341c8f945fae1da6c77b6ec345f47f2a,citation,https://pdfs.semanticscholar.org/0da7/5b0d341c8f945fae1da6c77b6ec345f47f2a.pdf,The Effect of Computer-Generated Descriptions on Photo-Sharing Experiences of People With Visual Impairments,2017 +9,PIPA,pipa,42.4505507,-76.4783513,Cornell University,edu,0aaf785d7f21d2b5ad582b456896495d30b0a4e2,citation,http://dl.acm.org/citation.cfm?id=3173789,A Face Recognition Application for People with Visual Impairments: Understanding Use Beyond the Lab,2018 +10,PIPA,pipa,22.42031295,114.20788644,Chinese University of Hong Kong,edu,c97a5f2241cc6cd99ef0c4527ea507a50841f60b,citation,https://arxiv.org/pdf/1807.10510.pdf,Person Search in Videos with One Portrait Through Visual and Temporal Links,2018 +11,PIPA,pipa,40.00229045,116.32098908,Tsinghua University,edu,c97a5f2241cc6cd99ef0c4527ea507a50841f60b,citation,https://arxiv.org/pdf/1807.10510.pdf,Person Search in Videos with One Portrait Through Visual and Temporal Links,2018 +12,PIPA,pipa,22.42031295,114.20788644,Chinese University of Hong Kong,edu,6fed504da4e192fe4c2d452754d23d3db4a4e5e3,citation,http://pdfs.semanticscholar.org/85ee/d639f7367c794a6d8b38619697af3efaacfe.pdf,Learning Deep Features via Congenerous Cosine Loss for Person Recognition,2017 +13,PIPA,pipa,-33.8809651,151.20107299,University of Technology Sydney,edu,0b84f07af44f964817675ad961def8a51406dd2e,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.357,Person Re-identification in the Wild,2017 +14,PIPA,pipa,17.4454957,78.34854698,International Institute of Information Technology,edu,eb8a3948c4be0d23eb7326d27f2271be893b3409,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7914701,A Probabilistic Approach to People-Centric Photo Selection and Sequencing,2017 +15,PIPA,pipa,1.3037257,103.7737763,University of Illinois’ Advanced Digital Sciences Center,edu,eb8a3948c4be0d23eb7326d27f2271be893b3409,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7914701,A Probabilistic Approach to People-Centric Photo Selection and Sequencing,2017 +16,PIPA,pipa,49.2579566,7.04577417,Max Planck Institute for Informatics,edu,3e0a1884448bfd7f416c6a45dfcdfc9f2e617268,citation,https://arxiv.org/pdf/1805.05838.pdf,Understanding and Controlling User Linkability in Decentralized Learning,2018 +17,PIPA,pipa,23.09461185,113.28788994,Sun Yat-Sen University,edu,725c3605c2d26d113637097358cd4c08c19ff9e1,citation,https://arxiv.org/pdf/1807.00504.pdf,Deep Reasoning with Knowledge Graph for Social Relationship Understanding,2018 +18,PIPA,pipa,51.7534538,-1.25400997,University of Oxford,edu,ff1f45bdad41d8b35435098041e009627e60d208,citation,http://pdfs.semanticscholar.org/ff1f/45bdad41d8b35435098041e009627e60d208.pdf,"NAGRANI, ZISSERMAN: FROM BENEDICT CUMBERBATCH TO SHERLOCK HOLMES 1 From Benedict Cumberbatch to Sherlock Holmes: Character Identification in TV series without a Script",2017 +19,PIPA,pipa,40.742252,-74.0270949,Stevens Institute of Technology,edu,1e1d7cbbef67e9e042a3a0a9a1bcefcc4a9adacf,citation,http://personal.stevens.edu/~hli18//data/papers/CVPR2016_CameraReady.pdf,A Multi-level Contextual Model for Person Recognition in Photo Albums,2016 +20,PIPA,pipa,42.4505507,-76.4783513,Cornell University,edu,269248eb8a44da5248cef840f7079b1294dbf237,citation,https://arxiv.org/pdf/1805.01515.pdf,The Effect of Computer-Generated Descriptions on Photo-Sharing Experiences of People with Visual Impairments,2017 +21,PIPA,pipa,40.47913175,-74.43168868,Rutgers University,edu,d4448f8aa320f04066cc43201d55ddd023eb712e,citation,https://pdfs.semanticscholar.org/d444/8f8aa320f04066cc43201d55ddd023eb712e.pdf,Clothing Change Aware Person Identification,0 +22,PIPA,pipa,33.9928298,-81.02685168,University of South Carolina,edu,d4448f8aa320f04066cc43201d55ddd023eb712e,citation,https://pdfs.semanticscholar.org/d444/8f8aa320f04066cc43201d55ddd023eb712e.pdf,Clothing Change Aware Person Identification,0 +23,PIPA,pipa,39.94976005,116.33629046,Beijing Jiaotong University,edu,b5968e7bb23f5f03213178c22fd2e47af3afa04c,citation,https://arxiv.org/pdf/1705.07206.pdf,Multiple-Human Parsing in the Wild,2017 +24,PIPA,pipa,1.2962018,103.77689944,National University of Singapore,edu,b5968e7bb23f5f03213178c22fd2e47af3afa04c,citation,https://arxiv.org/pdf/1705.07206.pdf,Multiple-Human Parsing in the Wild,2017 +25,PIPA,pipa,42.4505507,-76.4783513,Cornell University,edu,537328af75f50d49696972a6c34bca97c14bc762,citation,https://arxiv.org/pdf/1805.04049.pdf,Exploiting Unintended Feature Leakage in Collaborative Learning,2018 +26,PIPA,pipa,22.42031295,114.20788644,Chinese University of Hong Kong,edu,1c9efb6c895917174ac6ccc3bae191152f90c625,citation,https://arxiv.org/pdf/1806.03084.pdf,Unifying Identification and Context Learning for Person Recognition,2018 +27,PIPA,pipa,37.21872455,-80.42542519,Virginia Polytechnic Institute and State University,edu,6d8eef8f8d6cd8436c55018e6ca5c5907b31ac19,citation,http://pdfs.semanticscholar.org/6d8e/ef8f8d6cd8436c55018e6ca5c5907b31ac19.pdf,Understanding Representations and Reducing their Redundancy in Deep Networks,2016 +28,PIPA,pipa,17.4454957,78.34854698,International Institute of Information Technology,edu,01e27c91c7cef926389f913d12410725e7dd35ab,citation,https://doi.org/10.1007/s11760-017-1140-5,Semi-supervised annotation of faces in image collection,2018 +29,PIPA,pipa,49.2579566,7.04577417,Max Planck Institute for Informatics,edu,b68150bfdec373ed8e025f448b7a3485c16e3201,citation,https://arxiv.org/pdf/1703.09471.pdf,Adversarial Image Perturbation for Privacy Protection A Game Theory Perspective,2017 +30,PIPA,pipa,30.284151,-97.73195598,University of Texas at Austin,edu,3c57e28a4eb463d532ea2b0b1ba4b426ead8d9a0,citation,http://pdfs.semanticscholar.org/73cc/fdedbd7d72a147925727ba1932f9488cfde3.pdf,Defeating Image Obfuscation with Deep Learning,2016 +31,PIPA,pipa,28.2290209,112.99483204,"National University of Defense Technology, China",edu,5f771fed91c8e4b666489ba2384d0705bcf75030,citation,https://arxiv.org/pdf/1804.03287.pdf,Understanding Humans in Crowded Scenes: Deep Nested Adversarial Learning and A New Benchmark for Multi-Human Parsing,2018 +32,PIPA,pipa,1.2962018,103.77689944,National University of Singapore,edu,5f771fed91c8e4b666489ba2384d0705bcf75030,citation,https://arxiv.org/pdf/1804.03287.pdf,Understanding Humans in Crowded Scenes: Deep Nested Adversarial Learning and A New Benchmark for Multi-Human Parsing,2018 +33,PIPA,pipa,22.42031295,114.20788644,Chinese University of Hong Kong,edu,d949fadc9b6c5c8b067fa42265ad30945f9caa99,citation,https://arxiv.org/pdf/1710.00870.pdf,Rethinking Feature Discrimination and Polymerization for Large-scale Recognition,2017 +34,PIPA,pipa,-34.9189226,138.60423668,University of Adelaide,edu,3d24b386d003bee176a942c26336dbe8f427aadd,citation,http://arxiv.org/abs/1611.09967,Sequential Person Recognition in Photo Albums with a Recurrent Network,2017 +35,PIPA,pipa,42.4505507,-76.4783513,Cornell University,edu,8bdf6f03bde08c424c214188b35be8b2dec7cdea,citation,https://arxiv.org/pdf/1805.04049.pdf,Inference Attacks Against Collaborative Learning,2018 diff --git a/site/datasets/final/umd_faces.csv b/site/datasets/final/umd_faces.csv new file mode 100644 index 00000000..53788401 --- /dev/null +++ b/site/datasets/final/umd_faces.csv @@ -0,0 +1,34 @@ +index,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,UMD,umd_faces,0.0,0.0,,,31b05f65405534a696a847dd19c621b7b8588263,main,http://arxiv.org/abs/1611.01484,UMDFaces: An annotated face dataset for training deep networks,2017 +1,UMD,umd_faces,39.2899685,-76.62196103,University of Maryland,edu,19458454308a9f56b7de76bf7d8ff8eaa52b0173,citation,https://pdfs.semanticscholar.org/1945/8454308a9f56b7de76bf7d8ff8eaa52b0173.pdf,Deep Features for Recognizing Disguised Faces in the Wild,0 +2,UMD,umd_faces,28.2290209,112.99483204,"National University of Defense Technology, China",edu,511a8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7,citation,https://pdfs.semanticscholar.org/511a/8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7.pdf,A Community Detection Approach to Cleaning Extremely Large Face Database,2018 +3,UMD,umd_faces,25.01682835,121.53846924,National Taiwan University,edu,81884e1de00e59f24bc20254584d73a1a1806933,citation,https://arxiv.org/pdf/1811.02328.pdf,Super-Identity Convolutional Neural Network for Face Hallucination,2018 +4,UMD,umd_faces,39.993008,116.329882,SenseTime,company,81884e1de00e59f24bc20254584d73a1a1806933,citation,https://arxiv.org/pdf/1811.02328.pdf,Super-Identity Convolutional Neural Network for Face Hallucination,2018 +5,UMD,umd_faces,30.284151,-97.73195598,University of Texas at Austin,edu,81884e1de00e59f24bc20254584d73a1a1806933,citation,https://arxiv.org/pdf/1811.02328.pdf,Super-Identity Convolutional Neural Network for Face Hallucination,2018 +6,UMD,umd_faces,45.7835966,4.7678948,École Centrale de Lyon,edu,486840f4f524e97f692a7f6b42cd19019ee71533,citation,https://arxiv.org/pdf/1703.08388v2.pdf,DeepVisage: Making Face Recognition Simple Yet With Powerful Generalization Skills,2017 +7,UMD,umd_faces,48.832493,2.267474,Safran Identity and Security,company,486840f4f524e97f692a7f6b42cd19019ee71533,citation,https://arxiv.org/pdf/1703.08388v2.pdf,DeepVisage: Making Face Recognition Simple Yet With Powerful Generalization Skills,2017 +8,UMD,umd_faces,41.70456775,-86.23822026,University of Notre Dame,edu,73ea06787925157df519a15ee01cc3dc1982a7e0,citation,https://arxiv.org/pdf/1811.01474.pdf,Fast Face Image Synthesis with Minimal Training,2018 +9,UMD,umd_faces,30.40550035,-91.18620474,Louisiana State University,edu,9f65319b8a33c8ec11da2f034731d928bf92e29d,citation,http://pdfs.semanticscholar.org/9f65/319b8a33c8ec11da2f034731d928bf92e29d.pdf,Taking Roll: a Pipeline for Face Recognition,2018 +10,UMD,umd_faces,51.24303255,-0.59001382,University of Surrey,edu,ed07856461da6c7afa4f1782b5b607b45eebe9f6,citation,https://pdfs.semanticscholar.org/ed07/856461da6c7afa4f1782b5b607b45eebe9f6.pdf,D Morphable Models as Spatial Transformer Networks,2017 +11,UMD,umd_faces,53.94540365,-1.03138878,University of York,edu,ed07856461da6c7afa4f1782b5b607b45eebe9f6,citation,https://pdfs.semanticscholar.org/ed07/856461da6c7afa4f1782b5b607b45eebe9f6.pdf,D Morphable Models as Spatial Transformer Networks,2017 +12,UMD,umd_faces,53.94540365,-1.03138878,University of York,edu,6a4419ce2338ea30a570cf45624741b754fa52cb,citation,https://arxiv.org/pdf/1804.02541.pdf,Statistical transformer networks: learning shape and appearance models via self supervision,2018 +13,UMD,umd_faces,51.49887085,-0.17560797,Imperial College London,edu,809ea255d144cff780300440d0f22c96e98abd53,citation,http://pdfs.semanticscholar.org/809e/a255d144cff780300440d0f22c96e98abd53.pdf,ArcFace: Additive Angular Margin Loss for Deep Face Recognition,2018 +14,UMD,umd_faces,39.2899685,-76.62196103,University of Maryland,edu,def2983576001bac7d6461d78451159800938112,citation,https://arxiv.org/pdf/1705.07426.pdf,The Do’s and Don’ts for CNN-Based Face Verification,2017 +15,UMD,umd_faces,43.7776426,11.259765,University of Florence,edu,746c0205fdf191a737df7af000eaec9409ede73f,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8423119,Investigating Nuisances in DCNN-Based Face Recognition,2018 +16,UMD,umd_faces,33.5866784,-101.87539204,Electrical and Computer Engineering,edu,ebb3d5c70bedf2287f9b26ac0031004f8f617b97,citation,https://doi.org/10.1109/MSP.2017.2764116,"Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans",2018 +17,UMD,umd_faces,39.2899685,-76.62196103,University of Maryland,edu,ebb3d5c70bedf2287f9b26ac0031004f8f617b97,citation,https://doi.org/10.1109/MSP.2017.2764116,"Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans",2018 +18,UMD,umd_faces,51.24303255,-0.59001382,University of Surrey,edu,c146aa6d56233ce700032f1cb179700778557601,citation,https://arxiv.org/pdf/1708.07199.pdf,3D Morphable Models as Spatial Transformer Networks,2017 +19,UMD,umd_faces,53.94540365,-1.03138878,University of York,edu,c146aa6d56233ce700032f1cb179700778557601,citation,https://arxiv.org/pdf/1708.07199.pdf,3D Morphable Models as Spatial Transformer Networks,2017 +20,UMD,umd_faces,25.01682835,121.53846924,National Taiwan University,edu,17423fe480b109e1d924314c1dddb11b084e8a42,citation,https://pdfs.semanticscholar.org/1742/3fe480b109e1d924314c1dddb11b084e8a42.pdf,Deep Disguised Faces Recognition,0 +21,UMD,umd_faces,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,7a131fafa7058fb75fdca32d0529bc7cb50429bd,citation,https://arxiv.org/pdf/1704.04086.pdf,Beyond Face Rotation: Global and Local Perception GAN for Photorealistic and Identity Preserving Frontal View Synthesis,2017 +22,UMD,umd_faces,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,cd6aaa37fffd0b5c2320f386be322b8adaa1cc68,citation,https://arxiv.org/pdf/1804.06655.pdf,Deep Face Recognition: A Survey,2018 +23,UMD,umd_faces,30.274084,120.15507,Alibaba,company,89497854eada7e32f06aa8f3c0ceedc0e91ecfef,citation,https://doi.org/10.1109/TIP.2017.2784571,Deep Context-Sensitive Facial Landmark Detection With Tree-Structured Modeling,2018 +24,UMD,umd_faces,30.19331415,120.11930822,Zhejiang University,edu,89497854eada7e32f06aa8f3c0ceedc0e91ecfef,citation,https://doi.org/10.1109/TIP.2017.2784571,Deep Context-Sensitive Facial Landmark Detection With Tree-Structured Modeling,2018 +25,UMD,umd_faces,38.8920756,-104.79716389,"University of Colorado, Colorado Springs",edu,d4f1eb008eb80595bcfdac368e23ae9754e1e745,citation,https://arxiv.org/pdf/1708.02337.pdf,Unconstrained Face Detection and Open-Set Face Recognition Challenge,2017 +26,UMD,umd_faces,51.7534538,-1.25400997,University of Oxford,edu,eb027969f9310e0ae941e2adee2d42cdf07d938c,citation,https://arxiv.org/pdf/1710.08092.pdf,VGGFace2: A Dataset for Recognising Faces across Pose and Age,2018 +27,UMD,umd_faces,51.49887085,-0.17560797,Imperial College London,edu,c43ed9b34cad1a3976bac7979808eb038d88af84,citation,https://arxiv.org/pdf/1804.03675.pdf,Semi-supervised Adversarial Learning to Generate Photorealistic Face Images of New Identities from 3D Morphable Model,2018 +28,UMD,umd_faces,51.24303255,-0.59001382,University of Surrey,edu,c43ed9b34cad1a3976bac7979808eb038d88af84,citation,https://arxiv.org/pdf/1804.03675.pdf,Semi-supervised Adversarial Learning to Generate Photorealistic Face Images of New Identities from 3D Morphable Model,2018 +29,UMD,umd_faces,41.70456775,-86.23822026,University of Notre Dame,edu,e64c166dc5bb33bc61462a8b5ac92edb24d905a1,citation,https://arxiv.org/pdf/1811.01474.pdf,Fast Face Image Synthesis with Minimal Training.,2018 +30,UMD,umd_faces,51.7534538,-1.25400997,University of Oxford,edu,70c59dc3470ae867016f6ab0e008ac8ba03774a1,citation,https://arxiv.org/pdf/1710.08092.pdf,VGGFace2: A Dataset for Recognising Faces across Pose and Age,2018 +31,UMD,umd_faces,38.99203005,-76.9461029,University of Maryland College Park,edu,3d2891950f1b76f783a9ba77b3c55b8e68b95fbe,citation,https://arxiv.org/pdf/1802.06713.pdf,Disentangling 3D Pose in a Dendritic CNN for Unconstrained 2D Face Alignment,2018 +32,UMD,umd_faces,51.49887085,-0.17560797,Imperial College London,edu,1929863fff917ee7f6dc428fc1ce732777668eca,citation,https://arxiv.org/pdf/1712.04695.pdf,UV-GAN: Adversarial Facial UV Map Completion for Pose-Invariant Face Recognition,2018 diff --git a/site/datasets/final/voc.csv b/site/datasets/final/voc.csv new file mode 100644 index 00000000..9400d7d6 --- /dev/null +++ b/site/datasets/final/voc.csv @@ -0,0 +1,401 @@ +index,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,VOC,voc,0.0,0.0,,,abe9f3b91fd26fa1b50cd685c0d20debfb372f73,main,http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf,The Pascal Visual Object Classes Challenge: A Retrospective,2014 +1,VOC,voc,40.0044795,116.370238,Chinese Academy of Sciences,edu,ed2f711cf9bcd9d7ab039d746af109ed9573421a,citation,https://pdfs.semanticscholar.org/ed2f/711cf9bcd9d7ab039d746af109ed9573421a.pdf,Pixel-Wise Classification Method for High Resolution Remote Sensing Imagery Using Deep Neural Networks,2018 +2,VOC,voc,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,ed2f711cf9bcd9d7ab039d746af109ed9573421a,citation,https://pdfs.semanticscholar.org/ed2f/711cf9bcd9d7ab039d746af109ed9573421a.pdf,Pixel-Wise Classification Method for High Resolution Remote Sensing Imagery Using Deep Neural Networks,2018 +3,VOC,voc,13.0222347,77.56718325,Indian Institute of Science Bangalore,edu,a6ac6463b5c89ac9eb013c978f213b309cc6a5c7,citation,https://arxiv.org/pdf/1808.01134.pdf,iSPA-Net: Iterative Semantic Pose Alignment Network,2018 +4,VOC,voc,42.3583961,-71.09567788,MIT,edu,aaf4d938f2e66d158d5e635a9c1d279cdc7639c0,citation,http://pdfs.semanticscholar.org/aaf4/d938f2e66d158d5e635a9c1d279cdc7639c0.pdf,Toward visual understanding of everyday object,2015 +5,VOC,voc,42.2942142,-83.71003894,University of Michigan,edu,74dbcc09a3456ddacf5cece640b84045ebdf6be1,citation,https://arxiv.org/pdf/1810.05162.pdf,Characterizing Adversarial Examples Based on Spatial Consistency Information for Semantic Segmentation,2018 +6,VOC,voc,49.2767454,-122.91777375,Simon Fraser University,edu,74dbcc09a3456ddacf5cece640b84045ebdf6be1,citation,https://arxiv.org/pdf/1810.05162.pdf,Characterizing Adversarial Examples Based on Spatial Consistency Information for Semantic Segmentation,2018 +7,VOC,voc,46.109237,7.08453549,IDIAP Research Institute,edu,dedc7b080b8e13d72f8dc33e248e7637d191fdbf,citation,http://pdfs.semanticscholar.org/dedc/7b080b8e13d72f8dc33e248e7637d191fdbf.pdf,Beyond Dataset Bias: Multi-task Unaligned Shared Knowledge Transfer,2012 +8,VOC,voc,52.17638955,0.14308882,University of Cambridge,edu,dedc7b080b8e13d72f8dc33e248e7637d191fdbf,citation,http://pdfs.semanticscholar.org/dedc/7b080b8e13d72f8dc33e248e7637d191fdbf.pdf,Beyond Dataset Bias: Multi-task Unaligned Shared Knowledge Transfer,2012 +9,VOC,voc,39.00041165,-77.10327775,National Institutes of Health,edu,18c57ddc9c0164ee792661f43a5578f7a00d0330,citation,https://arxiv.org/pdf/1705.02315v2.pdf,ChestX-Ray8: Hospital-Scale Chest X-Ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases,2017 +10,VOC,voc,37.40253645,-122.11655107,Toyota Research Institute,edu,a825680aeb853fc34c65b5844c4c4391148f18c3,citation,https://arxiv.org/pdf/1711.10006.pdf,SSD-6D: Making RGB-Based 3D Detection and 6D Pose Estimation Great Again,2017 +11,VOC,voc,40.00229045,116.32098908,Tsinghua University,edu,f249c266321d661ae398c26ddb8c7409f6455ba1,citation,https://pdfs.semanticscholar.org/f249/c266321d661ae398c26ddb8c7409f6455ba1.pdf,Revisiting Faster R-CNN: A Deeper Look at Region Proposal Network,2017 +12,VOC,voc,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,7fa5ede4a34dbe604ce317d529eed78db6642bc0,citation,https://arxiv.org/pdf/1709.01829.pdf,Soft Proposal Networks for Weakly Supervised Object Localization,2017 +13,VOC,voc,35.9990522,-78.9290629,Duke University,edu,7fa5ede4a34dbe604ce317d529eed78db6642bc0,citation,https://arxiv.org/pdf/1709.01829.pdf,Soft Proposal Networks for Weakly Supervised Object Localization,2017 +14,VOC,voc,42.3583961,-71.09567788,MIT,edu,05fdd29536d55fe3ad00689b6f60ada8bc761e91,citation,http://people.csail.mit.edu/torralba/publications/ihog_iccv.pdf,HOGgles: Visualizing Object Detection Features,2013 +15,VOC,voc,24.7925484,120.9951183,National Tsing Hua University,edu,394bf41cd8578ec10cd34452c688c3e3de1c16a7,citation,https://pdfs.semanticscholar.org/394b/f41cd8578ec10cd34452c688c3e3de1c16a7.pdf,Multi-view to Novel View: Synthesizing Novel Views With Self-learned Confidence,2018 +16,VOC,voc,22.42031295,114.20788644,Chinese University of Hong Kong,edu,2453dd38cde21f3248b55d281405f11d58168fa9,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.342,Multi-scale Patch Aggregation (MPA) for Simultaneous Detection and Segmentation,2016 +17,VOC,voc,50.7791703,6.06728733,RWTH Aachen University,edu,ccb9ffa26b28dffc4f7d613821d1a9f0d60ea3f4,citation,https://arxiv.org/pdf/1706.09364.pdf,Online Adaptation of Convolutional Neural Networks for Video Object Segmentation,2017 +18,VOC,voc,39.87549675,32.78553506,Middle East Technical University,edu,d38af10096aa90dfccd7e4cec9757900bf6958bd,citation,https://arxiv.org/pdf/1807.04067.pdf,MultiPoseNet: Fast Multi-Person Pose Estimation Using Pose Residual Network,2018 +19,VOC,voc,40.0044795,116.370238,Chinese Academy of Sciences,edu,8c1e828a4826a1fb3eb47ee432f5333b974fa141,citation,http://pdfs.semanticscholar.org/8c1e/828a4826a1fb3eb47ee432f5333b974fa141.pdf,Spatial Graph for Image Classification,2012 +20,VOC,voc,38.88140235,121.52281098,Dalian University of Technology,edu,2a31b4bf2a294b6e67956a6cd5ed6d875af548e0,citation,https://arxiv.org/pdf/1710.01020.pdf,Learning Affinity via Spatial Propagation Networks,2017 +21,VOC,voc,39.2899685,-76.62196103,University of Maryland,edu,0790c400bfe6fbefe88ef7791476e1abf1952089,citation,https://arxiv.org/pdf/1511.04067v1.pdf,Deep Gaussian Conditional Random Field Network: A Model-Based Deep Network for Discriminative Denoising,2016 +22,VOC,voc,41.3868913,2.16352385,University of Barcelona,edu,442cf9b24661c9ea5c2a1dcabd4a5b8af1cd89da,citation,https://arxiv.org/pdf/1806.10805.pdf,Beyond One-hot Encoding: lower dimensional target embedding,2018 +23,VOC,voc,55.94951105,-3.19534913,University of Edinburgh,edu,04eda7eee3e0282de50e54554f50870dd17defa1,citation,https://arxiv.org/pdf/1705.08280v1.pdf,How Hard Can It Be? Estimating the Difficulty of Visual Search in an Image,2016 +24,VOC,voc,1.2962018,103.77689944,National University of Singapore,edu,90a4125974564a5ab6c2ce2ff685fc36e9cf0680,citation,https://arxiv.org/pdf/1703.08448.pdf,Object Region Mining with Adversarial Erasing: A Simple Classification to Semantic Segmentation Approach,2017 +25,VOC,voc,39.94976005,116.33629046,Beijing Jiaotong University,edu,90a4125974564a5ab6c2ce2ff685fc36e9cf0680,citation,https://arxiv.org/pdf/1703.08448.pdf,Object Region Mining with Adversarial Erasing: A Simple Classification to Semantic Segmentation Approach,2017 +26,VOC,voc,39.9922379,116.30393816,Peking University,edu,c3dd6c1ddbb9cfcc1bed6383ffaa0b1ce4d13625,citation,https://arxiv.org/pdf/1807.01544.pdf,TextSnake: A Flexible Representation for Detecting Text of Arbitrary Shapes,2018 +27,VOC,voc,37.4102193,-122.05965487,Carnegie Mellon University,edu,2976605dc3b73377696537291d45f09f1ab1fbf5,citation,http://www.ri.cmu.edu/pub_files/2016/6/multi-task.pdf,Cross-Stitch Networks for Multi-task Learning,2016 +28,VOC,voc,28.54632595,77.27325504,Indian Institute of Technology Delhi,edu,25e9a2ec45c34d4610359196dc505a72c3833336,citation,http://pdfs.semanticscholar.org/25e9/a2ec45c34d4610359196dc505a72c3833336.pdf,Benchmarking KAZE and MCM for Multiclass Classification,2015 +29,VOC,voc,39.9808333,116.34101249,Beihang University,edu,935e639bebf905af2e35e8b1e7aa0538d7122185,citation,https://arxiv.org/pdf/1808.00313.pdf,A Network Structure to Explicitly Reduce Confusion Errors in Semantic Segmentation,2018 +30,VOC,voc,39.8011499,140.0459116,Akita Prefectural University,edu,211435a4e14d00f4aaed191acfb548185ee800b9,citation,http://pdfs.semanticscholar.org/2114/35a4e14d00f4aaed191acfb548185ee800b9.pdf,Visual Saliency Based Multiple Objects Segmentation and its Parallel Implementation for Real-Time Vision Processing,2015 +31,VOC,voc,49.25839375,-123.24658161,University of British Columbia,edu,9fae24003bbedecdb617f9779215d79d06b90dd8,citation,https://arxiv.org/pdf/1807.09856.pdf,Where Are the Blobs: Counting by Localization with Point Supervision,2018 +32,VOC,voc,40.72925325,-73.99625394,New York University,edu,c45681fa9d9c36a6a196017ef283ac38904f91bb,citation,https://arxiv.org/pdf/1711.07377.pdf,Pixel-wise object tracking,2017 +33,VOC,voc,37.4102193,-122.05965487,Carnegie Mellon University,edu,45f858f9e8d7713f60f52618e54089ba68dfcd6d,citation,http://openaccess.thecvf.com/content_ICCV_2017/papers/Sigurdsson_What_Actions_Are_ICCV_2017_paper.pdf,What Actions are Needed for Understanding Human Actions in Videos?,2017 +34,VOC,voc,51.7534538,-1.25400997,University of Oxford,edu,57bd01c042a5f64659b3a9f91c048b8594f762f6,citation,http://pdfs.semanticscholar.org/57bd/01c042a5f64659b3a9f91c048b8594f762f6.pdf,Advances in fine-grained visual categorization,2015 +35,VOC,voc,31.30104395,121.50045497,Fudan University,edu,9716416a15e79a36e3481bcdad79cdc905603e6d,citation,https://arxiv.org/pdf/1808.07016.pdf,Gaussian Word Embedding with a Wasserstein Distance Loss,2017 +36,VOC,voc,32.0565957,118.77408833,Nanjing University,edu,97265d64859e06900c11ae5bb5f03f3bd265f858,citation,https://arxiv.org/pdf/1612.01082.pdf,Multilabel Image Classification With Regional Latent Semantic Dependencies,2018 +37,VOC,voc,-34.9189226,138.60423668,University of Adelaide,edu,97265d64859e06900c11ae5bb5f03f3bd265f858,citation,https://arxiv.org/pdf/1612.01082.pdf,Multilabel Image Classification With Regional Latent Semantic Dependencies,2018 +38,VOC,voc,-33.8809651,151.20107299,University of Technology Sydney,edu,97265d64859e06900c11ae5bb5f03f3bd265f858,citation,https://arxiv.org/pdf/1612.01082.pdf,Multilabel Image Classification With Regional Latent Semantic Dependencies,2018 +39,VOC,voc,42.3583961,-71.09567788,MIT,edu,a19904e76b5ded44e6aeb9af85997d160de6bb22,citation,http://pdfs.semanticscholar.org/a199/04e76b5ded44e6aeb9af85997d160de6bb22.pdf,TernausNet: U-Net with VGG11 Encoder Pre-Trained on ImageNet for Image Segmentation,2018 +40,VOC,voc,47.05821,15.46019568,Graz University of Technology,edu,96a9ca7a8366ae0efe6b58a515d15b44776faf6e,citation,https://arxiv.org/pdf/1609.00129.pdf,Grid Loss: Detecting Occluded Faces,2016 +41,VOC,voc,47.05821,15.46019568,Graz University of Technology,edu,513b8dc73a9fbc467e1ac130fe8c842b5839ca51,citation,http://pdfs.semanticscholar.org/513b/8dc73a9fbc467e1ac130fe8c842b5839ca51.pdf,Dissertation Scalable Visual Navigation for Micro Aerial Vehicles using Geometric Prior Knowledge,2013 +42,VOC,voc,37.8687126,-122.25586815,"University of California, Berkeley",edu,0ee3aa2a78f9680bb65a823bd9195c879572ec1c,citation,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Dubey_What_Makes_an_ICCV_2015_paper.pdf,What Makes an Object Memorable?,2015 +43,VOC,voc,42.3583961,-71.09567788,MIT,edu,0ee3aa2a78f9680bb65a823bd9195c879572ec1c,citation,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Dubey_What_Makes_an_ICCV_2015_paper.pdf,What Makes an Object Memorable?,2015 +44,VOC,voc,37.36566745,-120.42158888,"University of California, Merced",edu,0ee3aa2a78f9680bb65a823bd9195c879572ec1c,citation,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Dubey_What_Makes_an_ICCV_2015_paper.pdf,What Makes an Object Memorable?,2015 +45,VOC,voc,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,a776acc53591c3eb0b53501d9758d984e2e52a97,citation,https://arxiv.org/pdf/1804.00880.pdf,Weakly Supervised Instance Segmentation using Class Peak Response,2018 +46,VOC,voc,35.9990522,-78.9290629,Duke University,edu,a776acc53591c3eb0b53501d9758d984e2e52a97,citation,https://arxiv.org/pdf/1804.00880.pdf,Weakly Supervised Instance Segmentation using Class Peak Response,2018 +47,VOC,voc,1.2962018,103.77689944,National University of Singapore,edu,423b941641728a21e37f41359a691815cdd84ceb,citation,http://arxiv.org/abs/1511.04517,Reversible Recursive Instance-Level Object Segmentation,2016 +48,VOC,voc,47.6423318,-122.1369302,Microsoft,company,666939690c564641b864eed0d60a410b31e49f80,citation,http://pdfs.semanticscholar.org/6669/39690c564641b864eed0d60a410b31e49f80.pdf,What Visual Attributes Characterize an Object Class?,2014 +49,VOC,voc,43.7776426,11.259765,University of Florence,edu,51e8e8c4cac8260ef21c25f9f2a0a68aedbc6d58,citation,https://arxiv.org/pdf/1704.02518.pdf,Deep Generative Adversarial Compression Artifact Removal,2017 +50,VOC,voc,-34.9189226,138.60423668,University of Adelaide,edu,3b01a839d174dad6f2635cff7ebe7e1aaad701a4,citation,http://pdfs.semanticscholar.org/3b01/a839d174dad6f2635cff7ebe7e1aaad701a4.pdf,Image Co-localization by Mimicking a Good Detector's Confidence Score Distribution,2016 +51,VOC,voc,31.83907195,117.26420748,University of Science and Technology of China,edu,d467035d83fb4e86c4a47b2ca87894388deb8c44,citation,https://pdfs.semanticscholar.org/d467/035d83fb4e86c4a47b2ca87894388deb8c44.pdf,Relief R-CNN : Utilizing Convolutional Feature Interrelationship for Object Detection,2016 +52,VOC,voc,30.284151,-97.73195598,University of Texas at Austin,edu,264a2b946fae4af23c646cc08fc56947b5be82cf,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2015.7301302,Robust object recognition in RGB-D egocentric videos based on Sparse Affine Hull Kernel,2015 +53,VOC,voc,34.0687788,-118.4450094,"University of California, Los Angeles",edu,480888bad59b314236f2d947ebf308ae146c98e4,citation,https://arxiv.org/pdf/1511.06881.pdf,Zoom Better to See Clearer: Human and Object Parsing with Hierarchical Auto-Zoom Net,2016 +54,VOC,voc,25.01682835,121.53846924,National Taiwan University,edu,a1ee55d529e04a80f4eae3b30d0961a985a64fa4,citation,http://www.cs.utexas.edu/~ycsu/publications/mm029-su.pdf,Enabling low bitrate mobile visual recognition: a performance versus bandwidth evaluation,2013 +55,VOC,voc,-34.9189226,138.60423668,University of Adelaide,edu,0cd736baf31dceea1cc39ac72e00b65587f5fb9e,citation,http://pdfs.semanticscholar.org/4ad0/b6f189718a7287c6e7b90eb05331e56db334.pdf,Learning Hash Functions Using Column Generation,2013 +56,VOC,voc,39.2899685,-76.62196103,University of Maryland,edu,6424574cb92b316928c37232869bfadcb5b4c20f,citation,https://arxiv.org/pdf/1711.05282.pdf,C-WSL: Count-Guided Weakly Supervised Localization,2018 +57,VOC,voc,47.6543238,-122.30800894,University of Washington,edu,51eba481dac6b229a7490f650dff7b17ce05df73,citation,http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf,Situation Recognition: Visual Semantic Role Labeling for Image Understanding,2016 +58,VOC,voc,47.3764534,8.54770931,ETH Zürich,edu,961a5d5750f18e91e28a767b3cb234a77aac8305,citation,http://pdfs.semanticscholar.org/961a/5d5750f18e91e28a767b3cb234a77aac8305.pdf,Face Detection without Bells and Whistles,2014 +59,VOC,voc,40.11116745,-88.22587665,"University of Illinois, Urbana-Champaign",edu,0c05f60998628884a9ac60116453f1a91bcd9dda,citation,http://pdfs.semanticscholar.org/7b19/80d4ac1730fd0145202a8cb125bf05d96f01.pdf,Optimizing Open-Ended Crowdsourcing: The Next Frontier in Crowdsourced Data Management,2016 +60,VOC,voc,40.11116745,-88.22587665,"University of Illinois, Urbana-Champaign",edu,efa2aacb0fbee857015fad1dba72767f56be6f39,citation,https://pdfs.semanticscholar.org/efa2/aacb0fbee857015fad1dba72767f56be6f39.pdf,Aggregating Crowdsourced Image Segmentations,2018 +61,VOC,voc,37.3936717,-122.0807262,Facebook,company,efa2aacb0fbee857015fad1dba72767f56be6f39,citation,https://pdfs.semanticscholar.org/efa2/aacb0fbee857015fad1dba72767f56be6f39.pdf,Aggregating Crowdsourced Image Segmentations,2018 +62,VOC,voc,34.0687788,-118.4450094,"University of California, Los Angeles",edu,17113b0f647ce05b2e50d1d40c856370f94da7de,citation,http://pdfs.semanticscholar.org/1711/3b0f647ce05b2e50d1d40c856370f94da7de.pdf,Zoom Better to See Clearer: Human Part Segmentation with Auto Zoom Net,2015 +63,VOC,voc,40.11116745,-88.22587665,"University of Illinois, Urbana-Champaign",edu,549d55a06c5402696e063ce36b411f341a64f8a9,citation,http://arxiv.org/pdf/1511.06078v1.pdf,Learning Deep Structure-Preserving Image-Text Embeddings,2016 +64,VOC,voc,33.776033,-84.39884086,Georgia Institute of Technology,edu,549d55a06c5402696e063ce36b411f341a64f8a9,citation,http://arxiv.org/pdf/1511.06078v1.pdf,Learning Deep Structure-Preserving Image-Text Embeddings,2016 +65,VOC,voc,35.9020448,139.93622009,University of Tokyo,edu,44bfa5311f0921664e9036f63cadd71049a35f35,citation,https://pdfs.semanticscholar.org/44bf/a5311f0921664e9036f63cadd71049a35f35.pdf,Faster R-CNN-Based Glomerular Detection in Multistained Human Whole Slide Images,2018 +66,VOC,voc,49.2579566,7.04577417,Max Planck Institute for Informatics,edu,133f1f2679892d408420d8092283539010723359,citation,http://arxiv.org/pdf/1502.05082v3.pdf,What Makes for Effective Detection Proposals?,2016 +67,VOC,voc,60.18558755,24.8242733,Aalto University,edu,98d04187f091f402a90a6a9a2108393ca5f91563,citation,https://arxiv.org/pdf/1807.09828.pdf,ADVIO: An Authentic Dataset for Visual-Inertial Odometry,2018 +68,VOC,voc,61.44964205,23.85877462,Tampere University of Technology,edu,98d04187f091f402a90a6a9a2108393ca5f91563,citation,https://arxiv.org/pdf/1807.09828.pdf,ADVIO: An Authentic Dataset for Visual-Inertial Odometry,2018 +69,VOC,voc,37.4102193,-122.05965487,Carnegie Mellon University,edu,f8015e31d1421f6aee5e17fc3907070b8e0a5e59,citation,http://pdfs.semanticscholar.org/f801/5e31d1421f6aee5e17fc3907070b8e0a5e59.pdf,Towards Usable Multimedia Event Detection from Web Videos,2016 +70,VOC,voc,34.0224149,-118.28634407,University of Southern California,edu,6b9e8acef979c13fa9ecc8fe9b635b312fedbcbe,citation,https://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Chang_Multiple_Structured-Instance_Learning_2014_CVPR_paper.pdf,Multiple Structured-Instance Learning for Semantic Segmentation with Uncertain Training Data,2014 +71,VOC,voc,51.4584837,-2.6097752,University of Bristol,edu,72fd97d21d6465d4bb407b6f8f3accd4419a2fb4,citation,https://pdfs.semanticscholar.org/384a/ea88ffd79295c99bcb80552f8655dbb87509.pdf,Automated Identification of Individual Great White Sharks from Unrestricted Fin Imagery,2015 +72,VOC,voc,40.0044795,116.370238,Chinese Academy of Sciences,edu,62b83bf64f200ebb9fa16dfb7108b85e390b2207,citation,https://arxiv.org/pdf/1807.11236.pdf,Semantic Labeling in Very High Resolution Images via a Self-Cascaded Convolutional Neural Network,2018 +73,VOC,voc,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,62b83bf64f200ebb9fa16dfb7108b85e390b2207,citation,https://arxiv.org/pdf/1807.11236.pdf,Semantic Labeling in Very High Resolution Images via a Self-Cascaded Convolutional Neural Network,2018 +74,VOC,voc,37.4102193,-122.05965487,Carnegie Mellon University,edu,2577211aeaaa1f2245ddc379564813bee3d46c06,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Misra_Seeing_Through_the_CVPR_2016_paper.pdf,Seeing through the Human Reporting Bias: Visual Classifiers from Noisy Human-Centric Labels,2016 +75,VOC,voc,47.6423318,-122.1369302,Microsoft,company,2577211aeaaa1f2245ddc379564813bee3d46c06,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Misra_Seeing_Through_the_CVPR_2016_paper.pdf,Seeing through the Human Reporting Bias: Visual Classifiers from Noisy Human-Centric Labels,2016 +76,VOC,voc,51.7534538,-1.25400997,University of Oxford,edu,3900fb44902396f94fb070be41199b4beecc9081,citation,https://arxiv.org/pdf/1612.02101.pdf,Bottom-Up Top-Down Cues for Weakly-Supervised Semantic Segmentation,2017 +77,VOC,voc,34.0687788,-118.4450094,"University of California, Los Angeles",edu,32c45df9e11e6751bcea1b928f398f6c134d22c6,citation,http://pdfs.semanticscholar.org/32c4/5df9e11e6751bcea1b928f398f6c134d22c6.pdf,Towards Unified Object Detection and Semantic Segmentation,2014 +78,VOC,voc,42.36782045,-71.12666653,Harvard University,edu,2bcd59835528c583bb5b310522a5ba6e99c58b15,citation,http://pdfs.semanticscholar.org/c0ef/596a212d0e40c79c6760673fe122e517b43c.pdf,Multi-class Open Set Recognition Using Probability of Inclusion,2014 +79,VOC,voc,55.94951105,-3.19534913,University of Edinburgh,edu,3920a205990abc7883c70cc96a0410a2d056c2a8,citation,http://groups.inf.ed.ac.uk/calvin/Publications/papazoglouICCV2013-camera-ready.pdf,Fast Object Segmentation in Unconstrained Video,2013 +80,VOC,voc,1.2962018,103.77689944,National University of Singapore,edu,b6810adcfd507b2e019ebc8afe4f44f953faf946,citation,https://pdfs.semanticscholar.org/b681/0adcfd507b2e019ebc8afe4f44f953faf946.pdf,ML-LocNet: Improving Object Localization with Multi-view Learning Network,2018 +81,VOC,voc,40.0141905,-83.0309143,University of Electronic Science and Technology of China,edu,b6810adcfd507b2e019ebc8afe4f44f953faf946,citation,https://pdfs.semanticscholar.org/b681/0adcfd507b2e019ebc8afe4f44f953faf946.pdf,ML-LocNet: Improving Object Localization with Multi-view Learning Network,2018 +82,VOC,voc,33.776033,-84.39884086,Georgia Institute of Technology,edu,0e08cf0b19f0600dadce0f6694420d643ea9828b,citation,http://openaccess.thecvf.com/content_iccv_2015/papers/Humayun_The_Middle_Child_ICCV_2015_paper.pdf,The Middle Child Problem: Revisiting Parametric Min-Cut and Seeds for Object Proposals,2015 +83,VOC,voc,45.5198289,-122.67797964,Oregon State University,edu,0e08cf0b19f0600dadce0f6694420d643ea9828b,citation,http://openaccess.thecvf.com/content_iccv_2015/papers/Humayun_The_Middle_Child_ICCV_2015_paper.pdf,The Middle Child Problem: Revisiting Parametric Min-Cut and Seeds for Object Proposals,2015 +84,VOC,voc,30.19331415,120.11930822,Zhejiang University,edu,81bf7a4b8b3c21d42cb82f946f762c94031e11b8,citation,https://pdfs.semanticscholar.org/81bf/7a4b8b3c21d42cb82f946f762c94031e11b8.pdf,Segmentation of Nerve on Ultrasound Images Using Deep Adversarial Network,2017 +85,VOC,voc,52.4107358,-4.05295501,Aberystwyth University,edu,30d8fbb9345cdf1096635af7d39a9b04af9b72f9,citation,https://pdfs.semanticscholar.org/30d8/fbb9345cdf1096635af7d39a9b04af9b72f9.pdf,Watching plants grow - a position paper on computer vision and Arabidopsis thaliana,2017 +86,VOC,voc,43.66333345,-79.39769975,University of Toronto,edu,87204e4e1a96b8f59cb91828199dacd192292231,citation,http://pdfs.semanticscholar.org/8720/4e4e1a96b8f59cb91828199dacd192292231.pdf,Towards Real-Time Detection and Tracking of Basketball Players using Deep Neural Networks,2017 +87,VOC,voc,40.00229045,116.32098908,Tsinghua University,edu,30a4637cbc461838c151073b265fb08e00492ff4,citation,http://faculty.ucmerced.edu/mhyang/papers/cvpr16_object_localization.pdf,Weakly Supervised Object Localization with Progressive Domain Adaptation,2016 +88,VOC,voc,50.7338124,7.1022465,University of Bonn,edu,606cfdcc43203351dbb944a3bb3719695e557e37,citation,https://pdfs.semanticscholar.org/606c/fdcc43203351dbb944a3bb3719695e557e37.pdf,Ex Paucis Plura : Learning Affordance Segmentation from Very Few Examples,2018 +89,VOC,voc,39.2899685,-76.62196103,University of Maryland,edu,47b6cd69c0746688f6e17b37d73fa12422826dbc,citation,http://pdfs.semanticscholar.org/47b6/cd69c0746688f6e17b37d73fa12422826dbc.pdf,Self corrective Perturbations for Semantic Segmentation and Classification,2017 +90,VOC,voc,38.99203005,-76.9461029,University of Maryland College Park,edu,47b6cd69c0746688f6e17b37d73fa12422826dbc,citation,http://pdfs.semanticscholar.org/47b6/cd69c0746688f6e17b37d73fa12422826dbc.pdf,Self corrective Perturbations for Semantic Segmentation and Classification,2017 +91,VOC,voc,42.8298248,-73.87719385,GE Global Research Center,edu,47b6cd69c0746688f6e17b37d73fa12422826dbc,citation,http://pdfs.semanticscholar.org/47b6/cd69c0746688f6e17b37d73fa12422826dbc.pdf,Self corrective Perturbations for Semantic Segmentation and Classification,2017 +92,VOC,voc,51.7534538,-1.25400997,University of Oxford,edu,14421119527aa5882e1552a651fbd2d73bc94637,citation,http://pdfs.semanticscholar.org/9b81/86b6bc1e05d7a473d2afebc8a12698d88691.pdf,Searching for objects driven by context,2012 +93,VOC,voc,55.94951105,-3.19534913,University of Edinburgh,edu,14421119527aa5882e1552a651fbd2d73bc94637,citation,http://pdfs.semanticscholar.org/9b81/86b6bc1e05d7a473d2afebc8a12698d88691.pdf,Searching for objects driven by context,2012 +94,VOC,voc,40.11116745,-88.22587665,"University of Illinois, Urbana-Champaign",edu,3410a1489d04ec6fcfbb3d76d39055117931ccf0,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2013.126,Learning Collections of Part Models for Object Recognition,2013 +95,VOC,voc,40.00229045,116.32098908,Tsinghua University,edu,69b647afe6526256a93033eac14ce470204e7bae,citation,http://pdfs.semanticscholar.org/d7dd/4fb9074db71ebf9155d64b439102d4c7b0c5.pdf,Training Deep Neural Networks via Direct Loss Minimization,2016 +96,VOC,voc,43.66333345,-79.39769975,University of Toronto,edu,69b647afe6526256a93033eac14ce470204e7bae,citation,http://pdfs.semanticscholar.org/d7dd/4fb9074db71ebf9155d64b439102d4c7b0c5.pdf,Training Deep Neural Networks via Direct Loss Minimization,2016 +97,VOC,voc,55.94951105,-3.19534913,University of Edinburgh,edu,81825711c2aaa1b9d3ead1a300e71c4353a41382,citation,https://arxiv.org/pdf/1607.03476.pdf,End-to-end training of object class detectors for mean average precision,2016 +98,VOC,voc,39.993008,116.329882,SenseTime,company,2ce073da76e6ed87eda2da08da0e00f4f060f1a6,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.78,Deep Saliency with Encoded Low Level Distance Map and High Level Features,2016 +99,VOC,voc,51.7534538,-1.25400997,University of Oxford,edu,2313c827d3cb9a291b6a00d015c29580862bbdcc,citation,https://arxiv.org/pdf/1808.03575.pdf,Weakly- and Semi-supervised Panoptic Segmentation,2018 +100,VOC,voc,37.4102193,-122.05965487,Carnegie Mellon University,edu,839a2155995acc0a053a326e283be12068b35cb8,citation,http://pdfs.semanticscholar.org/839a/2155995acc0a053a326e283be12068b35cb8.pdf,Handcrafted Local Features are Convolutional Neural Networks,2015 +101,VOC,voc,32.0565957,118.77408833,Nanjing University,edu,634e02d6107529d672cbbdf5b97990966e289829,citation,https://arxiv.org/pdf/1802.05394.pdf,Cost-Effective Training of Deep CNNs with Active Model Adaptation,2018 +102,VOC,voc,56.45796755,-2.98214831,University of Dundee,edu,d0137881f6c791997337b9cc7f1efbd61977270d,citation,http://pdfs.semanticscholar.org/d013/7881f6c791997337b9cc7f1efbd61977270d.pdf,"University of Dundee An automated pattern recognition system for classifying indirect immunofluorescence images for HEp-2 cells and specimens Manivannan,",2016 +103,VOC,voc,42.2942142,-83.71003894,University of Michigan,edu,ed173a39f4cd980eef319116b6ba39cec1b37c42,citation,https://arxiv.org/pdf/1611.05424.pdf,Associative Embedding: End-to-End Learning for Joint Detection and Grouping,2017 +104,VOC,voc,40.00229045,116.32098908,Tsinghua University,edu,ed173a39f4cd980eef319116b6ba39cec1b37c42,citation,https://arxiv.org/pdf/1611.05424.pdf,Associative Embedding: End-to-End Learning for Joint Detection and Grouping,2017 +105,VOC,voc,37.43131385,-122.16936535,Stanford University,edu,84cf838be40e2ab05732fbefbb93ccb2afb0cb48,citation,http://pdfs.semanticscholar.org/84cf/838be40e2ab05732fbefbb93ccb2afb0cb48.pdf,Recognizing Handwritten Characters,2016 +106,VOC,voc,37.26728,126.9841151,Seoul National University,edu,b082f440ee91e2751701401919584203b37e1e1a,citation,https://pdfs.semanticscholar.org/303c/28f1ba643a7cd88255cc379e79052fb7e7b1.pdf,SeedNet : Automatic Seed Generation with Deep Reinforcement Learning for Robust Interactive Segmentation,2018 +107,VOC,voc,22.2081469,114.25964115,University of Hong Kong,edu,6008213e4270e88cb414459de759c961469b92dd,citation,https://arxiv.org/pdf/1802.09129.pdf,"Multi-Evidence Filtering and Fusion for Multi-Label Classification, Object Detection and Semantic Segmentation Based on Weakly Supervised Learning",2018 +108,VOC,voc,33.776033,-84.39884086,Georgia Institute of Technology,edu,90b4470032f2796a347a0080bcd833c2db0e8bf0,citation,https://arxiv.org/pdf/1807.07760.pdf,Improving Image Clustering With Multiple Pretrained CNN Feature Extractors,2018 +109,VOC,voc,40.0044795,116.370238,Chinese Academy of Sciences,edu,beecaf2d6e9d102b6b2459ea38e15179a4b55ffd,citation,https://arxiv.org/pdf/1611.09587.pdf,Surveillance Video Parsing with Single Frame Supervision,2017 +110,VOC,voc,41.3868913,2.16352385,University of Barcelona,edu,0fb8317a8bf5feaf297af8e9b94c50c5ed0e8277,citation,http://pdfs.semanticscholar.org/0fb8/317a8bf5feaf297af8e9b94c50c5ed0e8277.pdf,Detecting Hands in Egocentric Videos: Towards Action Recognition,2017 +111,VOC,voc,51.7534538,-1.25400997,University of Oxford,edu,0e0179eb4b43016691f0f1473a08089dda21f8f0,citation,http://pdfs.semanticscholar.org/0e01/79eb4b43016691f0f1473a08089dda21f8f0.pdf,The Art of Detection,2016 +112,VOC,voc,49.2579566,7.04577417,Max Planck Institute for Informatics,edu,135c957f6a80f250507c7707479e584c288f430f,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.498,Image-Based Synthesis and Re-synthesis of Viewpoints Guided by 3D Models,2014 +113,VOC,voc,39.00041165,-77.10327775,National Institutes of Health,edu,c72b063e23b8b45b57a42ebc2f9714297c539a6f,citation,https://arxiv.org/pdf/1801.04334.pdf,TieNet: Text-Image Embedding Network for Common Thorax Disease Classification and Reporting in Chest X-rays,2018 +114,VOC,voc,36.05238585,140.11852361,National Institute of Advanced Industrial Science and Technology,edu,061ffd3967540424ac4e4066f4a605d8318bab90,citation,https://staff.aist.go.jp/takumi.kobayashi/publication/2014/CVPR2014.pdf,Dirichlet-Based Histogram Feature Transform for Image Classification,2014 +115,VOC,voc,42.3583961,-71.09567788,MIT,edu,1a2e9a56e5f71bf95a2f68b6e67e2aaa1c6bf91e,citation,http://pdfs.semanticscholar.org/1a2e/9a56e5f71bf95a2f68b6e67e2aaa1c6bf91e.pdf,FPM: Fine Pose Parts-Based Model with 3D CAD Models,2014 +116,VOC,voc,51.7534538,-1.25400997,University of Oxford,edu,c6f58adf4a5ee8499cbc9b9bc1e6f1c39f1f8eae,citation,https://pdfs.semanticscholar.org/c6f5/8adf4a5ee8499cbc9b9bc1e6f1c39f1f8eae.pdf,Earn to P Ay a Ttention,2018 +117,VOC,voc,32.87935255,-117.23110049,"University of California, San Diego",edu,3c8db2ca155ce4e15ec8a2c4c4b979de654fb296,citation,http://pages.ucsd.edu/~ztu/publication/iccv15_hed.pdf,Holistically-Nested Edge Detection,2015 +118,VOC,voc,59.34986645,18.07063213,"KTH Royal Institute of Technology, Stockholm",edu,8ccd6aaf1ee4b66c13fffbf560e3920f9bdf5f10,citation,http://pdfs.semanticscholar.org/8ccd/6aaf1ee4b66c13fffbf560e3920f9bdf5f10.pdf,A multitask deep learning model for real-time deployment in embedded systems,2017 +119,VOC,voc,53.5238572,-113.52282665,University of Alberta,edu,b4f5cf797a1c857f32e5740d53d9990bc925af2b,citation,https://pdfs.semanticscholar.org/b4f5/cf797a1c857f32e5740d53d9990bc925af2b.pdf,Review of Segmentation with Deep Learning and Discover Its Application in Ultrasound Images,2018 +120,VOC,voc,49.2579566,7.04577417,Max Planck Institute for Informatics,edu,3bad18554678ab46bbbf9de41d36423bc8083c83,citation,http://arxiv.org/pdf/1511.07803v1.pdf,Weakly Supervised Object Boundaries,2016 +121,VOC,voc,24.7925484,120.9951183,National Tsing Hua University,edu,07191c2047b5b643dd72a0583c1d537ba59f977a,citation,http://pdfs.semanticscholar.org/0719/1c2047b5b643dd72a0583c1d537ba59f977a.pdf,Interactive Segmentation from 1-Bit Feedback,2016 +122,VOC,voc,37.26728,126.9841151,Seoul National University,edu,ae6e8851dfd9c97e37e1cbd61b21cc54d5e2b9c7,citation,https://arxiv.org/pdf/1802.04977.pdf,Paraphrasing Complex Network: Network Compression via Factor Transfer,2018 +123,VOC,voc,37.26728,126.9841151,Seoul National University,edu,5375a3344017d9502ebb4170325435de3da1fa16,citation,https://doi.org/10.1007/978-3-642-37447-0,Computer Vision – ACCV 2012,2012 +124,VOC,voc,40.0044795,116.370238,Chinese Academy of Sciences,edu,5375a3344017d9502ebb4170325435de3da1fa16,citation,https://doi.org/10.1007/978-3-642-37447-0,Computer Vision – ACCV 2012,2012 +125,VOC,voc,33.776033,-84.39884086,Georgia Institute of Technology,edu,5375a3344017d9502ebb4170325435de3da1fa16,citation,https://doi.org/10.1007/978-3-642-37447-0,Computer Vision – ACCV 2012,2012 +126,VOC,voc,55.94951105,-3.19534913,University of Edinburgh,edu,fdfd57d4721174eba288e501c0c120ad076cdca8,citation,https://arxiv.org/pdf/1704.07129.pdf,An Analysis of Action Recognition Datasets for Language and Vision Tasks,2017 +127,VOC,voc,32.0565957,118.77408833,Nanjing University,edu,ec83c63e28ae2a658bc76a6750e078c3a54b9760,citation,https://arxiv.org/pdf/1705.02758.pdf,Deep Descriptor Transforming for Image Co-Localization,2017 +128,VOC,voc,-34.9189226,138.60423668,University of Adelaide,edu,ec83c63e28ae2a658bc76a6750e078c3a54b9760,citation,https://arxiv.org/pdf/1705.02758.pdf,Deep Descriptor Transforming for Image Co-Localization,2017 +129,VOC,voc,59.34986645,18.07063213,"KTH Royal Institute of Technology, Stockholm",edu,b1177aad0db8bd6b605ffe0d68addaf97b1f9a6b,citation,https://pdfs.semanticscholar.org/5035/733022916db7e5965c565327e169da1e2f39.pdf,Visual Representations and Models: From Latent SVM to Deep Learning,2016 +130,VOC,voc,31.83907195,117.26420748,University of Science and Technology of China,edu,a5ae7d662ed086bc5b0c9a2c1dc54fcb23635000,citation,https://pdfs.semanticscholar.org/a5ae/7d662ed086bc5b0c9a2c1dc54fcb23635000.pdf,Relief R-CNN : Utilizing Convolutional Feature Interrelationship for Fast Object Detection Deployment,2016 +131,VOC,voc,22.53521465,113.9315911,Shenzhen University,edu,a5ae7d662ed086bc5b0c9a2c1dc54fcb23635000,citation,https://pdfs.semanticscholar.org/a5ae/7d662ed086bc5b0c9a2c1dc54fcb23635000.pdf,Relief R-CNN : Utilizing Convolutional Feature Interrelationship for Fast Object Detection Deployment,2016 +132,VOC,voc,53.38522185,-6.25740874,Dublin City University,edu,9528e2e8c20517ab916f803c0371abb4f0ed488b,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Pan_Shallow_and_Deep_CVPR_2016_paper.pdf,Shallow and Deep Convolutional Networks for Saliency Prediction,2016 +133,VOC,voc,55.94951105,-3.19534913,University of Edinburgh,edu,e2272f50ffa33b8e41509e4b795ad5a4eb27bb46,citation,https://arxiv.org/pdf/1607.07671.pdf,Region-based semantic segmentation with end-to-end training,2016 +134,VOC,voc,51.7534538,-1.25400997,University of Oxford,edu,b8d61dc56a4112e0317c6a7323417ee649476148,citation,https://arxiv.org/pdf/1807.05636.pdf,Cross Pixel Optical Flow Similarity for Self-Supervised Learning,2018 +135,VOC,voc,40.0044795,116.370238,Chinese Academy of Sciences,edu,db0a4af734dab1854c2e8dfe499fe0e353226e45,citation,https://pdfs.semanticscholar.org/db0a/4af734dab1854c2e8dfe499fe0e353226e45.pdf,Hot Anchors: A Heuristic Anchors Sampling Method in RCNN-Based Object Detection,2018 +136,VOC,voc,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,db0a4af734dab1854c2e8dfe499fe0e353226e45,citation,https://pdfs.semanticscholar.org/db0a/4af734dab1854c2e8dfe499fe0e353226e45.pdf,Hot Anchors: A Heuristic Anchors Sampling Method in RCNN-Based Object Detection,2018 +137,VOC,voc,33.776033,-84.39884086,Georgia Institute of Technology,edu,ffe0f43206169deef3a2bf64cec90fe35bb1a8e5,citation,http://pdfs.semanticscholar.org/ffe0/f43206169deef3a2bf64cec90fe35bb1a8e5.pdf,"Automated Processing of Imaging Data through Multi-tiered Classification of Biological Structures Illustrated Using Caenorhabditis elegans +",2015 +138,VOC,voc,55.94951105,-3.19534913,University of Edinburgh,edu,ffe0f43206169deef3a2bf64cec90fe35bb1a8e5,citation,http://pdfs.semanticscholar.org/ffe0/f43206169deef3a2bf64cec90fe35bb1a8e5.pdf,"Automated Processing of Imaging Data through Multi-tiered Classification of Biological Structures Illustrated Using Caenorhabditis elegans +",2015 +139,VOC,voc,45.77445695,126.67684917,Harbin Engineering University,edu,479eb6579194d4d944671dfe5e90b122ca4b58fd,citation,https://pdfs.semanticscholar.org/479e/b6579194d4d944671dfe5e90b122ca4b58fd.pdf,Structural inference embedded adversarial networks for scene parsing,2018 +140,VOC,voc,34.2469152,108.91061982,Northwestern Polytechnical University,edu,479eb6579194d4d944671dfe5e90b122ca4b58fd,citation,https://pdfs.semanticscholar.org/479e/b6579194d4d944671dfe5e90b122ca4b58fd.pdf,Structural inference embedded adversarial networks for scene parsing,2018 +141,VOC,voc,1.29500195,103.84909214,Singapore Management University,edu,d289ce63055c10937e5715e940a4bb9d0af7a8c5,citation,http://dl.acm.org/citation.cfm?id=3081360,DeepMon: Mobile GPU-based Deep Learning Framework for Continuous Vision Applications,2017 +142,VOC,voc,60.18558755,24.8242733,Aalto University,edu,061bba574c7c2ef0ba9de91afc4fcab70feddd4f,citation,http://doi.ieeecomputersociety.org/10.1109/ICCV.2017.272,Paying Attention to Descriptions Generated by Image Captioning Models,2017 +143,VOC,voc,28.59899755,-81.19712501,University of Central Florida,edu,061bba574c7c2ef0ba9de91afc4fcab70feddd4f,citation,http://doi.ieeecomputersociety.org/10.1109/ICCV.2017.272,Paying Attention to Descriptions Generated by Image Captioning Models,2017 +144,VOC,voc,34.7275714,135.2371,Kobe University,edu,ee2217f9d22d6a18aaf97f05768035c38305d1fa,citation,https://doi.org/10.1109/APSIPA.2015.7415501,Detection of facial parts via deformable part model using part annotation,2015 +145,VOC,voc,50.7791703,6.06728733,RWTH Aachen University,edu,18219d85bb14f851fc4714df19cc7f38dff8ddc3,citation,http://pdfs.semanticscholar.org/1821/9d85bb14f851fc4714df19cc7f38dff8ddc3.pdf,Online Adaptation of Convolutional Neural Networks for the 2017 DAVIS Challenge on Video Object Segmentation,2017 +146,VOC,voc,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,da44881db32c132eb9cdef524618e3c8ed340b47,citation,https://arxiv.org/pdf/1802.00383.pdf,Annotation-Free and One-Shot Learning for Instance Segmentation of Homogeneous Object Clusters,2018 +147,VOC,voc,50.7338124,7.1022465,University of Bonn,edu,cc94b423c298003f0f164e63e63177d443291a77,citation,https://arxiv.org/pdf/1805.03994.pdf,Multi-View Semantic Labeling of 3D Point Clouds for Automated Plant Phenotyping,2018 +148,VOC,voc,39.9922379,116.30393816,Peking University,edu,83a811fd947415df2413d15386dbc558f07595cb,citation,https://arxiv.org/pdf/1709.08295.pdf,Fine-grained Discriminative Localization via Saliency-guided Faster R-CNN,2017 +149,VOC,voc,-33.8809651,151.20107299,University of Technology Sydney,edu,3a5f5aca6138abcf22ede1af5572e01eb0f761d1,citation,https://pdfs.semanticscholar.org/3a5f/5aca6138abcf22ede1af5572e01eb0f761d1.pdf,Optimizing Multivariate Performance Measures from Multi-View Data,2016 +150,VOC,voc,34.2469152,108.91061982,Northwestern Polytechnical University,edu,ce300b006f42c1b64ca0e53d1cf28d11a98ece8f,citation,https://pdfs.semanticscholar.org/ce30/0b006f42c1b64ca0e53d1cf28d11a98ece8f.pdf,Learning Multi-Instance Enriched Image Representations via Non-Greedy Ratio Maximization of the l 1-Norm Distances,0 +151,VOC,voc,34.0224149,-118.28634407,University of Southern California,edu,71b038958df0b7855fc7b8b8e7dcde8537a7c1ad,citation,http://pdfs.semanticscholar.org/71b0/38958df0b7855fc7b8b8e7dcde8537a7c1ad.pdf,Kernel Methods for Unsupervised Domain Adaptation by Boqing Gong,2015 +152,VOC,voc,34.2469152,108.91061982,Northwestern Polytechnical University,edu,af7cab9b4a2a2a565a3efe0a226c517f47289077,citation,https://arxiv.org/pdf/1803.10910.pdf,Deep Unsupervised Saliency Detection: A Multiple Noisy Labeling Perspective,2018 +153,VOC,voc,-35.2776999,149.118527,Australian National University,edu,af7cab9b4a2a2a565a3efe0a226c517f47289077,citation,https://arxiv.org/pdf/1803.10910.pdf,Deep Unsupervised Saliency Detection: A Multiple Noisy Labeling Perspective,2018 +154,VOC,voc,-34.9189226,138.60423668,University of Adelaide,edu,3a6ebdfb6375093885e846153a48139ef1ecfae6,citation,http://arxiv.org/abs/1411.7466,The treasure beneath convolutional layers: Cross-convolutional-layer pooling for image classification,2015 +155,VOC,voc,51.24303255,-0.59001382,University of Surrey,edu,a7e9d230bc44dfbe56757f3025d5b4caa49032f3,citation,http://pdfs.semanticscholar.org/a7e9/d230bc44dfbe56757f3025d5b4caa49032f3.pdf,Unity in Diversity: Discovering Topics from Words - Information Theoretic Co-clustering for Visual Categorization,2012 +156,VOC,voc,37.5557271,127.0436642,Hanyang University,edu,50137d663802224e683951c48970496b38b02141,citation,http://pdfs.semanticscholar.org/5013/7d663802224e683951c48970496b38b02141.pdf,DETRAC: A New Benchmark and Protocol for Multi-Object Tracking,2015 +157,VOC,voc,37.4102193,-122.05965487,Carnegie Mellon University,edu,07de8371ad4901356145722aa29abaeafd0986b9,citation,http://pdfs.semanticscholar.org/07de/8371ad4901356145722aa29abaeafd0986b9.pdf,Towards Usable Multimedia Event Detection,2017 +158,VOC,voc,41.21002475,-73.80407056,IBM Thomas J. Watson Research Center,company,af386bb1b5e8c9f65b3ae836198a93aa860d6331,citation,https://arxiv.org/pdf/1805.04574.pdf,Revisiting Dilated Convolution: A Simple Approach for Weakly- and Semi- Supervised Semantic Segmentation,2018 +159,VOC,voc,17.4454957,78.34854698,International Institute of Information Technology,edu,d6b1b0e60e1764982ef95d4ade8fcaa10bfb156a,citation,http://pdfs.semanticscholar.org/d6b1/b0e60e1764982ef95d4ade8fcaa10bfb156a.pdf,A Sketch-based Approach for Multimedia Retrieval,2016 +160,VOC,voc,51.49887085,-0.17560797,Imperial College London,edu,37b3637dab65b91a5c91bb6a583e69c448823cc1,citation,https://arxiv.org/pdf/1705.05994.pdf,Learning a Hierarchical Latent-Variable Model of 3D Shapes,2018 +161,VOC,voc,39.9574,-75.19026706,Drexel University,edu,83d16fb8f53156c9e2b28d75abb6532af515440f,citation,http://pdfs.semanticscholar.org/83d1/6fb8f53156c9e2b28d75abb6532af515440f.pdf,Large-scale Document Labeling using Supervised Sequence Embedding,2012 +162,VOC,voc,45.51181205,-122.68492999,Portland State University,edu,05e45f61dc7577c50114a382abc6e952ae24cdac,citation,https://pdfs.semanticscholar.org/05e4/5f61dc7577c50114a382abc6e952ae24cdac.pdf,"Object Detection and Recognition in Natural Settings by George William Dittmar A thesis submitted in partial fulfilment of the requirements of the degree Master of Science in Computer Science Thesis Committee: Melanie Mitchell, Chair",2012 +163,VOC,voc,37.4102193,-122.05965487,Carnegie Mellon University,edu,192235f5a9e4c9d6a28ec0d333e36f294b32f764,citation,http://www.andrew.cmu.edu/user/sjayasur/iccv.pdf,Reconfiguring the Imaging Pipeline for Computer Vision,2017 +164,VOC,voc,42.4505507,-76.4783513,Cornell University,edu,192235f5a9e4c9d6a28ec0d333e36f294b32f764,citation,http://www.andrew.cmu.edu/user/sjayasur/iccv.pdf,Reconfiguring the Imaging Pipeline for Computer Vision,2017 +165,VOC,voc,50.0764296,14.41802312,Czech Technical University,edu,bd4f2e7a196c0d6033a49390ee8836f4f551b7c8,citation,http://rrc.cvc.uab.es/files/Robust-Reading-Competition-Karatzas.pdf,ICDAR 2015 competition on Robust Reading,2015 +166,VOC,voc,33.59914655,130.22359848,Kyushu University,edu,bd4f2e7a196c0d6033a49390ee8836f4f551b7c8,citation,http://rrc.cvc.uab.es/files/Robust-Reading-Competition-Karatzas.pdf,ICDAR 2015 competition on Robust Reading,2015 +167,VOC,voc,49.2579566,7.04577417,Max Planck Institute for Informatics,edu,3d5575e9ba02128d94c20330f4525fc816411ec2,citation,https://arxiv.org/pdf/1612.02646.pdf,Learning Video Object Segmentation from Static Images,2017 +168,VOC,voc,51.7534538,-1.25400997,University of Oxford,edu,78f62042bfb3bb49ba10e142d118a9bb058b2a19,citation,http://pdfs.semanticscholar.org/78f6/2042bfb3bb49ba10e142d118a9bb058b2a19.pdf,WebSeg: Learning Semantic Segmentation from Web Searches,2018 +169,VOC,voc,40.11116745,-88.22587665,"University of Illinois, Urbana-Champaign",edu,0c7aac75ccd17d696cff2e1ce95db0493f5c18a2,citation,https://arxiv.org/pdf/1809.01123.pdf,VideoMatch: Matching Based Video Object Segmentation,2018 +170,VOC,voc,3.12267405,101.65356103,University of Malaya,edu,6c78add400f749c897dc3eb93996eda1c796e91c,citation,https://arxiv.org/pdf/1410.3752.pdf,Enhanced Random Forest with Image/Patch-Level Learning for Image Understanding,2014 +171,VOC,voc,51.49887085,-0.17560797,Imperial College London,edu,6c78add400f749c897dc3eb93996eda1c796e91c,citation,https://arxiv.org/pdf/1410.3752.pdf,Enhanced Random Forest with Image/Patch-Level Learning for Image Understanding,2014 +172,VOC,voc,39.9922379,116.30393816,Peking University,edu,6c78add400f749c897dc3eb93996eda1c796e91c,citation,https://arxiv.org/pdf/1410.3752.pdf,Enhanced Random Forest with Image/Patch-Level Learning for Image Understanding,2014 +173,VOC,voc,-34.9189226,138.60423668,University of Adelaide,edu,b61c0b11b1c25958d202b4f7ca772e1d95ee1037,citation,http://pdfs.semanticscholar.org/b61c/0b11b1c25958d202b4f7ca772e1d95ee1037.pdf,Bridging Category-level and Instance-level Semantic Image Segmentation,2016 +174,VOC,voc,34.0224149,-118.28634407,University of Southern California,edu,79894ddf290d3c7a768d634eceb7888564b5cf19,citation,https://arxiv.org/pdf/1708.01676.pdf,Query-Guided Regression Network with Context Policy for Phrase Grounding,2017 +175,VOC,voc,37.43131385,-122.16936535,Stanford University,edu,fec2a5a06a3aab5efe923a78d208ec747d5e4894,citation,https://arxiv.org/pdf/1805.12018.pdf,Generalizing to Unseen Domains via Adversarial Data Augmentation,2018 +176,VOC,voc,31.30104395,121.50045497,Fudan University,edu,5ac63895a7d3371a739d066bb1631fc178d8276a,citation,http://doi.acm.org/10.1145/3123266.3123379,Learning Semantic Feature Map for Visual Content Recognition,2017 +177,VOC,voc,39.2899685,-76.62196103,University of Maryland,edu,5ac63895a7d3371a739d066bb1631fc178d8276a,citation,http://doi.acm.org/10.1145/3123266.3123379,Learning Semantic Feature Map for Visual Content Recognition,2017 +178,VOC,voc,-34.40505545,150.87834655,University of Wollongong,edu,4e559f23bcf502c752f2938ad7f0182047b8d1e4,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Wang_A_Fast_Approximate_2013_CVPR_paper.pdf,A Fast Approximate AIB Algorithm for Distributional Word Clustering,2013 +179,VOC,voc,-35.2776999,149.118527,Australian National University,edu,7536b6a9f3cb4ae810e2ef6d0219134b4e546dd0,citation,http://pdfs.semanticscholar.org/7536/b6a9f3cb4ae810e2ef6d0219134b4e546dd0.pdf,Semi-Automatic Image Labelling Using Depth Information,2015 +180,VOC,voc,42.7298459,-73.67950216,Rensselaer Polytechnic Institute,edu,11b89011298e193d9e6a1d99302221c1d8645bda,citation,http://openaccess.thecvf.com/content_iccv_2015/papers/Gao_Structured_Feature_Selection_ICCV_2015_paper.pdf,Structured Feature Selection,2015 +181,VOC,voc,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,de3245c795bc50ebdb5d929c8da664341238264a,citation,https://arxiv.org/pdf/1705.08590.pdf,Generative Model With Coordinate Metric Learning for Object Recognition Based on 3D Models,2018 +182,VOC,voc,37.43131385,-122.16936535,Stanford University,edu,cc2eaa182f33defbb33d69e9547630aab7ed9c9c,citation,http://pdfs.semanticscholar.org/ce2e/e807a63bbdffa530c80915b04d11a7f29a21.pdf,Surpassing Humans and Computers with JELLYBEAN: Crowd-Vision-Hybrid Counting Algorithms,2015 +183,VOC,voc,40.00471095,-83.02859368,Ohio State University,edu,cc2eaa182f33defbb33d69e9547630aab7ed9c9c,citation,http://pdfs.semanticscholar.org/ce2e/e807a63bbdffa530c80915b04d11a7f29a21.pdf,Surpassing Humans and Computers with JELLYBEAN: Crowd-Vision-Hybrid Counting Algorithms,2015 +184,VOC,voc,40.11116745,-88.22587665,"University of Illinois, Urbana-Champaign",edu,cc2eaa182f33defbb33d69e9547630aab7ed9c9c,citation,http://pdfs.semanticscholar.org/ce2e/e807a63bbdffa530c80915b04d11a7f29a21.pdf,Surpassing Humans and Computers with JELLYBEAN: Crowd-Vision-Hybrid Counting Algorithms,2015 +185,VOC,voc,32.0565957,118.77408833,Nanjing University,edu,9c71e6f4e27b3a6f0f872ec683b0f6dfe0966c05,citation,http://pdfs.semanticscholar.org/9c71/e6f4e27b3a6f0f872ec683b0f6dfe0966c05.pdf,"Latent Dirichlet Allocation (LDA) and Topic modeling: models, applications, a survey",2017 +186,VOC,voc,1.2962018,103.77689944,National University of Singapore,edu,b88b83d2ffd30bf3bc3be3fb7492fd88f633b2fe,citation,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989a827.pdf,Subcategory-Aware Object Classification,2013 +187,VOC,voc,1.2962018,103.77689944,National University of Singapore,edu,b6a3802075d460093977f8566c451f950edf7a47,citation,https://pdfs.semanticscholar.org/0999/e5baf505eed0df8e2661c29354f3757b3399.pdf,Facilitating and Exploring Planar Homogeneous Texture for Indoor Scene Understanding,2016 +188,VOC,voc,51.7555205,-1.2261597,Oxford Brookes University,edu,cd6cab9357f333ad9966abc76f830c190a1b7911,citation,https://pdfs.semanticscholar.org/cd6c/ab9357f333ad9966abc76f830c190a1b7911.pdf,"Recognition, reorganisation, reconstruction and reinteraction for scene understanding",2014 +189,VOC,voc,47.3764534,8.54770931,ETH Zürich,edu,0fe8b5503681128da84a8454a4cc94470adc09ea,citation,http://pdfs.semanticscholar.org/b96a/0ccae1d15cffe3b479b2c56d9132b05cd846.pdf,Sparsity Potentials for Detecting Objects with the Hough Transform,2012 +190,VOC,voc,35.7036227,51.35125097,Sharif University of Technology,edu,0fe8b5503681128da84a8454a4cc94470adc09ea,citation,http://pdfs.semanticscholar.org/b96a/0ccae1d15cffe3b479b2c56d9132b05cd846.pdf,Sparsity Potentials for Detecting Objects with the Hough Transform,2012 +191,VOC,voc,47.6423318,-122.1369302,Microsoft,company,9bbc952adb3e3c6091d45d800e806d3373a52bac,citation,https://pdfs.semanticscholar.org/9bbc/952adb3e3c6091d45d800e806d3373a52bac.pdf,Learning Visual Classifiers using Human-centric Annotations,2015 +192,VOC,voc,35.6572957,139.54255868,University of Electro-Communications,edu,6e209d7d33c0be8afae863f4e4e9c3e86826711f,citation,http://img.cs.uec.ac.jp/pub/conf16/161204shimok_1_ppt.pdf,Weakly-supervised segmentation by combining CNN feature maps and object saliency maps,2016 +193,VOC,voc,40.00229045,116.32098908,Tsinghua University,edu,46d85e1dc7057bef62647bd9241601e9896a1b02,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2A_040_ext.pdf,Improving object proposals with multi-thresholding straddling expansion,2015 +194,VOC,voc,35.2742655,137.01327841,Chubu University,edu,67e3fac91c699c085d47774990572d8ccdc36f15,citation,http://pdfs.semanticscholar.org/67e3/fac91c699c085d47774990572d8ccdc36f15.pdf,Multiple Skip Connections and Dilated Convolutions for Semantic Segmentation,2017 +195,VOC,voc,34.0224149,-118.28634407,University of Southern California,edu,a4f29217d2120ed1490aea7e1c5b78c3b76e972f,citation,https://arxiv.org/pdf/1610.06907.pdf,Enhanced object detection via fusion with prior beliefs from image classification,2017 +196,VOC,voc,33.776033,-84.39884086,Georgia Institute of Technology,edu,f2d07a77711a8d74bbfa48a0436dae18a698b05a,citation,http://pdfs.semanticscholar.org/f2d0/7a77711a8d74bbfa48a0436dae18a698b05a.pdf,Composite Statistical Learning and Inference for Semantic Segmentation,2014 +197,VOC,voc,40.2075951,-8.42566148,University of Coimbra,edu,f2d07a77711a8d74bbfa48a0436dae18a698b05a,citation,http://pdfs.semanticscholar.org/f2d0/7a77711a8d74bbfa48a0436dae18a698b05a.pdf,Composite Statistical Learning and Inference for Semantic Segmentation,2014 +198,VOC,voc,55.7039571,13.1902011,Lund University,edu,f2d07a77711a8d74bbfa48a0436dae18a698b05a,citation,http://pdfs.semanticscholar.org/f2d0/7a77711a8d74bbfa48a0436dae18a698b05a.pdf,Composite Statistical Learning and Inference for Semantic Segmentation,2014 +199,VOC,voc,61.44964205,23.85877462,Tampere University of Technology,edu,ff11cb09e409996020a2dc3a8afc3b535e6b2482,citation,https://arxiv.org/pdf/1807.03142.pdf,Faster Bounding Box Annotation for Object Detection in Indoor Scenes,2018 +200,VOC,voc,35.84658875,127.1350133,Chonbuk National University,edu,e103fa24d7fa297cd206b22b3bf670bfda6c65c4,citation,https://pdfs.semanticscholar.org/e103/fa24d7fa297cd206b22b3bf670bfda6c65c4.pdf,Object Detection in Very High-Resolution Aerial Images Using One-Stage Densely Connected Feature Pyramid Network,2018 +201,VOC,voc,41.8268682,-71.40123146,Brown University,edu,9a781a01b5a9c210dd2d27db8b73b7d62bc64837,citation,http://pdfs.semanticscholar.org/9a78/1a01b5a9c210dd2d27db8b73b7d62bc64837.pdf,An Attempt to Build Object Detection Models by Reusing Parts,2013 +202,VOC,voc,1.2962018,103.77689944,National University of Singapore,edu,ac559888f996923c06b1cf90db6b57b12e582289,citation,http://pdfs.semanticscholar.org/ac55/9888f996923c06b1cf90db6b57b12e582289.pdf,Benchmarking neuromorphic vision: lessons learnt from computer vision,2015 +203,VOC,voc,47.3764534,8.54770931,ETH Zürich,edu,ac559888f996923c06b1cf90db6b57b12e582289,citation,http://pdfs.semanticscholar.org/ac55/9888f996923c06b1cf90db6b57b12e582289.pdf,Benchmarking neuromorphic vision: lessons learnt from computer vision,2015 +204,VOC,voc,39.2899685,-76.62196103,University of Maryland,edu,ac559888f996923c06b1cf90db6b57b12e582289,citation,http://pdfs.semanticscholar.org/ac55/9888f996923c06b1cf90db6b57b12e582289.pdf,Benchmarking neuromorphic vision: lessons learnt from computer vision,2015 +205,VOC,voc,55.94951105,-3.19534913,University of Edinburgh,edu,2a4fc35acaf09517e9c63821cadd428a84832416,citation,http://www.vision.ee.ethz.ch/en/publications/papers/proceedings/eth_biwi_00905.pdf,Learning object class detectors from weakly annotated video,2012 +206,VOC,voc,22.053565,113.39913285,Jilin University,edu,cd4850de71e4e858be5f5e6ef7f48d5bf7decea6,citation,http://pdfs.semanticscholar.org/cd48/50de71e4e858be5f5e6ef7f48d5bf7decea6.pdf,Distribution Entropy Boosted VLAD for Image Retrieval,2016 +207,VOC,voc,40.4319722,-86.92389368,Purdue University,edu,34b925a111ba29f73f5c0d1b363f357958d563c1,citation,https://www.microsoft.com/en-us/research/wp-content/uploads/2015/03/Shoaib_DATE_2015.pdf,SAPPHIRE: An always-on context-aware computer vision system for portable devices,2015 +208,VOC,voc,47.6423318,-122.1369302,Microsoft,company,34b925a111ba29f73f5c0d1b363f357958d563c1,citation,https://www.microsoft.com/en-us/research/wp-content/uploads/2015/03/Shoaib_DATE_2015.pdf,SAPPHIRE: An always-on context-aware computer vision system for portable devices,2015 +209,VOC,voc,24.7925484,120.9951183,National Tsing Hua University,edu,c76b611a986a2e09df22603d93b2d9125aaff369,citation,https://arxiv.org/pdf/1810.07050.pdf,Generating Self-Guided Dense Annotations for Weakly Supervised Semantic Segmentation,2018 +210,VOC,voc,22.053565,113.39913285,Jilin University,edu,1927d01b6b9acf865401b544e25b62a7ddbac5fa,citation,https://pdfs.semanticscholar.org/1927/d01b6b9acf865401b544e25b62a7ddbac5fa.pdf,An Enhanced Region Proposal Network for object detection using deep learning method,2018 +211,VOC,voc,-33.8809651,151.20107299,University of Technology Sydney,edu,1ecd20f7fc34344e396825d27bc5a9871ab0d0c2,citation,https://arxiv.org/pdf/1810.09091.pdf,SG-One: Similarity Guidance Network for One-Shot Semantic Segmentation,2018 +212,VOC,voc,42.3583961,-71.09567788,MIT,edu,26aa0aff1ea1baf848a521363cc455044690e090,citation,http://pdfs.semanticscholar.org/26aa/0aff1ea1baf848a521363cc455044690e090.pdf,A 2D + 3D Rich Data Approach to Scene Understanding,2013 +213,VOC,voc,46.0658836,11.1159894,University of Trento,edu,3548cb9ee54bd4c8b3421f1edd393da9038da293,citation,http://www.huppelen.nl/publications/2012cvprUnseenEventCompositionality.pdf,(Unseen) event recognition via semantic compositionality,2012 +214,VOC,voc,40.00229045,116.32098908,Tsinghua University,edu,25ee08db14dca641d085584909b551042618b8bf,citation,http://pdfs.semanticscholar.org/25ee/08db14dca641d085584909b551042618b8bf.pdf,Learning to Segment Instances in Videos with Spatial Propagation Network,2017 +215,VOC,voc,37.36566745,-120.42158888,"University of California, Merced",edu,25ee08db14dca641d085584909b551042618b8bf,citation,http://pdfs.semanticscholar.org/25ee/08db14dca641d085584909b551042618b8bf.pdf,Learning to Segment Instances in Videos with Spatial Propagation Network,2017 +216,VOC,voc,48.9095338,9.1831892,University of Stuttgart,edu,d0f81c31e11af1783644704321903a3d2bd83fd6,citation,https://pdfs.semanticscholar.org/d0f8/1c31e11af1783644704321903a3d2bd83fd6.pdf,3D Façade Labeling over Complex Scenarios: A Case Study Using Convolutional Neural Network and Structure-From-Motion,2018 +217,VOC,voc,50.7369302,-3.53647672,University of Exeter,edu,d0f81c31e11af1783644704321903a3d2bd83fd6,citation,https://pdfs.semanticscholar.org/d0f8/1c31e11af1783644704321903a3d2bd83fd6.pdf,3D Façade Labeling over Complex Scenarios: A Case Study Using Convolutional Neural Network and Structure-From-Motion,2018 +218,VOC,voc,38.99203005,-76.9461029,University of Maryland College Park,edu,a996f22a2d0c685f7e4972df9f45e99efc3cbb76,citation,https://arxiv.org/pdf/1708.00079.pdf,Towards the Success Rate of One: Real-Time Unconstrained Salient Object Detection,2018 +219,VOC,voc,47.05821,15.46019568,Graz University of Technology,edu,4da5f0c1d07725a06c6b4a2646e31ea3a5f14435,citation,http://pdfs.semanticscholar.org/4da5/f0c1d07725a06c6b4a2646e31ea3a5f14435.pdf,End-to-End Training of Hybrid CNN-CRF Models for Semantic Segmentation using Structured Learning,2017 +220,VOC,voc,52.3553655,4.9501644,University of Amsterdam,edu,26c58e24687ccbe9737e41837aab74e4a499d259,citation,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Li_Codemaps_-_Segment_2013_ICCV_paper.pdf,"Codemaps - Segment, Classify and Search Objects Locally",2013 +221,VOC,voc,37.4219999,-122.0840575,Google,company,299b65d5d3914dad9aae2f936165dcebcf78db88,citation,http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.203,Weakly-and Semi-Supervised Learning of a Deep Convolutional Network for Semantic Image Segmentation,2015 +222,VOC,voc,-34.9189226,138.60423668,University of Adelaide,edu,cb5dcd048b0eaa78a887a014be26a8a7b1325d36,citation,https://arxiv.org/pdf/1709.04093.pdf,Joint Learning of Set Cardinality and State Distribution,2018 +223,VOC,voc,34.2469152,108.91061982,Northwestern Polytechnical University,edu,63660c50e2669a5115c2379e622549d8ed79be00,citation,http://porikli.com/mysite/pdfs/porikli%202017%20-%20Deep%20salient%20object%20detection%20by%20integrating%20multi-level%20cues.pdf,Deep Salient Object Detection by Integrating Multi-level Cues,2017 +224,VOC,voc,-35.2776999,149.118527,Australian National University,edu,63660c50e2669a5115c2379e622549d8ed79be00,citation,http://porikli.com/mysite/pdfs/porikli%202017%20-%20Deep%20salient%20object%20detection%20by%20integrating%20multi-level%20cues.pdf,Deep Salient Object Detection by Integrating Multi-level Cues,2017 +225,VOC,voc,48.14955455,11.56775314,Technical University Munich,edu,472541ccd941b9b4c52e1f088cc1152de9b3430f,citation,https://arxiv.org/pdf/1612.00197.pdf,Learning in an Uncertain World: Representing Ambiguity Through Multiple Hypotheses,2017 +226,VOC,voc,47.3764534,8.54770931,ETH Zürich,edu,9184b0c04013bfdfd82f4f271b5f017396c2f085,citation,https://pdfs.semanticscholar.org/9184/b0c04013bfdfd82f4f271b5f017396c2f085.pdf,Semantic Segmentation for Line Drawing Vectorization Using Neural Networks,2018 +227,VOC,voc,37.4102193,-122.05965487,Carnegie Mellon University,edu,57488aa24092fa7118aa5374c90b282a32473cf9,citation,https://arxiv.org/pdf/1807.01257.pdf,A Weakly Supervised Adaptive DenseNet for Classifying Thoracic Diseases and Identifying Abnormalities,2018 +228,VOC,voc,39.9492344,-75.19198985,University of Pennsylvania,edu,57488aa24092fa7118aa5374c90b282a32473cf9,citation,https://arxiv.org/pdf/1807.01257.pdf,A Weakly Supervised Adaptive DenseNet for Classifying Thoracic Diseases and Identifying Abnormalities,2018 +229,VOC,voc,32.0565957,118.77408833,Nanjing University,edu,7771807cd05f78a4591f2d0b094ddd3e0bd5339a,citation,https://arxiv.org/pdf/1707.06399.pdf,Adaptive Feeding: Achieving Fast and Accurate Detections by Adaptively Combining Object Detectors,2017 +230,VOC,voc,50.7944026,-1.0971748,Cambridge University,edu,4558338873556d01fd290de6ddc55721c633a1ad,citation,http://pdfs.semanticscholar.org/4558/338873556d01fd290de6ddc55721c633a1ad.pdf,Training Constrained Deconvolutional Networks for Road Scene Semantic Segmentation,2016 +231,VOC,voc,42.3583961,-71.09567788,MIT,edu,85957b49896246bb416c0a182e52b355a8fa40b4,citation,https://arxiv.org/pdf/1806.03510.pdf,Feature Pyramid Network for Multi-Class Land Segmentation,2018 +232,VOC,voc,17.4454957,78.34854698,International Institute of Information Technology,edu,f5eb411217f729ad7ae84bfd4aeb3dedb850206a,citation,https://pdfs.semanticscholar.org/f5eb/411217f729ad7ae84bfd4aeb3dedb850206a.pdf,Tackling Low Resolution for Better Scene Understanding,2018 +233,VOC,voc,53.8338371,10.7035939,Institute of Systems and Robotics,edu,7fb8d9c36c23f274f2dd84945dd32ec2cc143de1,citation,http://pdfs.semanticscholar.org/8e44/ba779d7cdc23d597c2c6e4420129834e7e21.pdf,Semantic Segmentation with Second-Order Pooling,2012 +234,VOC,voc,50.7338124,7.1022465,University of Bonn,edu,7fb8d9c36c23f274f2dd84945dd32ec2cc143de1,citation,http://pdfs.semanticscholar.org/8e44/ba779d7cdc23d597c2c6e4420129834e7e21.pdf,Semantic Segmentation with Second-Order Pooling,2012 +235,VOC,voc,37.43131385,-122.16936535,Stanford University,edu,b5e3beb791cc17cdaf131d5cca6ceb796226d832,citation,http://pdfs.semanticscholar.org/b5e3/beb791cc17cdaf131d5cca6ceb796226d832.pdf,Novel Dataset for Fine-Grained Image Categorization: Stanford Dogs,2012 +236,VOC,voc,39.94976005,116.33629046,Beijing Jiaotong University,edu,b5968e7bb23f5f03213178c22fd2e47af3afa04c,citation,https://arxiv.org/pdf/1705.07206.pdf,Multiple-Human Parsing in the Wild,2017 +237,VOC,voc,1.2962018,103.77689944,National University of Singapore,edu,b5968e7bb23f5f03213178c22fd2e47af3afa04c,citation,https://arxiv.org/pdf/1705.07206.pdf,Multiple-Human Parsing in the Wild,2017 +238,VOC,voc,40.11116745,-88.22587665,"University of Illinois, Urbana-Champaign",edu,532c089b43983935e1001c5e35aa35440263beaf,citation,https://arxiv.org/pdf/1804.03166.pdf,G-Distillation: Reducing Overconfident Errors on Novel Samples,2018 +239,VOC,voc,33.776033,-84.39884086,Georgia Institute of Technology,edu,35fc0b28d0d674b28dd625d170bc641a36b17318,citation,http://pdfs.semanticscholar.org/35fc/0b28d0d674b28dd625d170bc641a36b17318.pdf,CSI: Composite Statistical Inference Techniques for Semantic Segmentation,2013 +240,VOC,voc,55.7039571,13.1902011,Lund University,edu,35fc0b28d0d674b28dd625d170bc641a36b17318,citation,http://pdfs.semanticscholar.org/35fc/0b28d0d674b28dd625d170bc641a36b17318.pdf,CSI: Composite Statistical Inference Techniques for Semantic Segmentation,2013 +241,VOC,voc,58.38131405,26.72078081,University of Tartu,edu,e4cb27d2a3e1153cb517d97d61de48ff0483c988,citation,https://pdfs.semanticscholar.org/e4cb/27d2a3e1153cb517d97d61de48ff0483c988.pdf,Viktoria Plemakova Vehicle Detection Based on Convolutional Neural Networks,2018 +242,VOC,voc,33.776033,-84.39884086,Georgia Institute of Technology,edu,3d0660e18c17db305b9764bb86b21a429241309e,citation,https://arxiv.org/pdf/1604.03505.pdf,Counting Everyday Objects in Everyday Scenes,2017 +243,VOC,voc,37.2381023,127.1903431,Myongji University,edu,a67da2dd79c01e8cc4029ecc5a05b97967403862,citation,https://pdfs.semanticscholar.org/a67d/a2dd79c01e8cc4029ecc5a05b97967403862.pdf,On Selecting Helpful Unlabeled Data for Improving Semi-Supervised Support Vector Machines,2014 +244,VOC,voc,40.00229045,116.32098908,Tsinghua University,edu,4ab69672e1116427d685bf7c1edb5b1fd0573b5e,citation,http://bigml.cs.tsinghua.edu.cn/~lingxi/PDFs/Xie_ACMMM12_EdgeGPP.pdf,Spatial pooling of heterogeneous features for image applications,2012 +245,VOC,voc,37.43131385,-122.16936535,Stanford University,edu,989c7cdafa9b90ab2ea0a9d8fa60634cc698f174,citation,http://pdfs.semanticscholar.org/989c/7cdafa9b90ab2ea0a9d8fa60634cc698f174.pdf,YoloFlow Real - time Object Tracking in Video CS 229 Course Project,2016 +246,VOC,voc,3.12267405,101.65356103,University of Malaya,edu,85af6c005df806b57b306a732dcb98e096d15bfb,citation,https://arxiv.org/pdf/1805.11227.pdf,Getting to Know Low-light Images with The Exclusively Dark Dataset,2018 +247,VOC,voc,37.43131385,-122.16936535,Stanford University,edu,cdb293381ff396d6e9c0f5e9578d411e759347fd,citation,https://pdfs.semanticscholar.org/022e/eae0edc09deb228da26d5390874f781ace0f.pdf,3 DR 2 N 2 : A Unified Approach for Single and Multiview 3 D Object Reconstruction,2016 +248,VOC,voc,51.7534538,-1.25400997,University of Oxford,edu,0e67717484684d90ae9d4e1bb9cdceb74b194910,citation,http://pdfs.semanticscholar.org/0e67/717484684d90ae9d4e1bb9cdceb74b194910.pdf,Mining Pixels: Weakly Supervised Semantic Segmentation Using Image Labels,2016 +249,VOC,voc,40.11116745,-88.22587665,"University of Illinois, Urbana-Champaign",edu,5b4b84ce3518c8a14f57f5f95a1d07fb60e58223,citation,https://pdfs.semanticscholar.org/9f92/05a60ddf1135929e0747db34363b3a8c6bc8.pdf,Diagnosing Error in Object Detectors,2012 +250,VOC,voc,42.718568,-84.47791571,Michigan State University,edu,47203943c86e4d9355ffd99cd3d75f37211fd805,citation,http://pdfs.semanticscholar.org/be18/9c7066c4d99d617d137c975139c594ad09af.pdf,Semi-Crowdsourced Clustering: Generalizing Crowd Labeling by Robust Distance Metric Learning,2012 +251,VOC,voc,42.8298248,-73.87719385,GE Global Research Center,edu,47203943c86e4d9355ffd99cd3d75f37211fd805,citation,http://pdfs.semanticscholar.org/be18/9c7066c4d99d617d137c975139c594ad09af.pdf,Semi-Crowdsourced Clustering: Generalizing Crowd Labeling by Robust Distance Metric Learning,2012 +252,VOC,voc,39.95472495,-75.15346905,Temple University,edu,45ff38add61df32a027048624f58952a67a7c5f5,citation,http://pdfs.semanticscholar.org/45ff/38add61df32a027048624f58952a67a7c5f5.pdf,Deep Context Convolutional Neural Networks for Semantic Segmentation,2017 +253,VOC,voc,43.08250655,-77.67121663,Rochester Institute of Technology,edu,0a789733ccb300d0dd9df6174faaa7e8c64e0409,citation,http://pdfs.semanticscholar.org/0a78/9733ccb300d0dd9df6174faaa7e8c64e0409.pdf,High-Resolution Multispectral Dataset for Semantic Segmentation,2017 +254,VOC,voc,47.05821,15.46019568,Graz University of Technology,edu,9d3a6e459e0cecda20a8afd69d182877ff0224cf,citation,http://pdfs.semanticscholar.org/9d3a/6e459e0cecda20a8afd69d182877ff0224cf.pdf,A Framework for Articulated Hand Pose Estimation and Evaluation,2015 +255,VOC,voc,52.3553655,4.9501644,University of Amsterdam,edu,943a1e218b917172199e524944006aa349f58968,citation,https://arxiv.org/pdf/1807.11857.pdf,Joint Learning of Intrinsic Images and Semantic Segmentation,2018 +256,VOC,voc,40.0044795,116.370238,Chinese Academy of Sciences,edu,5f68e2131d9275d56092e9fca05bcfc65abea0d8,citation,http://doi.acm.org/10.1145/2806416.2806469,Cross-Modal Similarity Learning: A Low Rank Bilinear Formulation,2015 +257,VOC,voc,40.9153196,-73.1270626,Stony Brook University,edu,f989a20fbcc2d576c0c4514a0e5085c741580778,citation,https://arxiv.org/pdf/1612.03236.pdf,Co-localization with Category-Consistent Features and Geodesic Distance Propagation,2017 +258,VOC,voc,42.36782045,-71.12666653,Harvard University,edu,f989a20fbcc2d576c0c4514a0e5085c741580778,citation,https://arxiv.org/pdf/1612.03236.pdf,Co-localization with Category-Consistent Features and Geodesic Distance Propagation,2017 +259,VOC,voc,24.7925484,120.9951183,National Tsing Hua University,edu,cf94200a476dc15d6da95db809349db4cfd8e92c,citation,https://arxiv.org/pdf/1807.11436.pdf,Leveraging Motion Priors in Videos for Improving Human Segmentation,2018 +260,VOC,voc,-34.9189226,138.60423668,University of Adelaide,edu,25dba68e4db0ce361032126b91f734f9252cae7c,citation,https://arxiv.org/pdf/1611.08998.pdf,DeepSetNet: Predicting Sets with Deep Neural Networks,2017 +261,VOC,voc,59.34986645,18.07063213,"KTH Royal Institute of Technology, Stockholm",edu,883767948f535ea2bf8a0c03047ca9064e1b078f,citation,https://pdfs.semanticscholar.org/8837/67948f535ea2bf8a0c03047ca9064e1b078f.pdf,A Combination of Object Recognition and Localisation for an Autonomous Racecar,0 +262,VOC,voc,23.09461185,113.28788994,Sun Yat-Sen University,edu,18095a530b532a70f3b615fef2f59e6fdacb2d84,citation,https://arxiv.org/pdf/1604.02271v3.pdf,Deep Structured Scene Parsing by Learning with Image Descriptions,2016 +263,VOC,voc,45.7413921,126.62552755,Harbin Institute of Technology,edu,18095a530b532a70f3b615fef2f59e6fdacb2d84,citation,https://arxiv.org/pdf/1604.02271v3.pdf,Deep Structured Scene Parsing by Learning with Image Descriptions,2016 +264,VOC,voc,-27.47715625,153.02841004,Queensland University of Technology,edu,9397e7acd062245d37350f5c05faf56e9cfae0d6,citation,http://pdfs.semanticscholar.org/9397/e7acd062245d37350f5c05faf56e9cfae0d6.pdf,DeepFruits: A Fruit Detection System Using Deep Neural Networks,2016 +265,VOC,voc,40.00229045,116.32098908,Tsinghua University,edu,03a24d15533dae78de78fd9d5f6c9050fb97f186,citation,https://doi.org/10.1109/SSCI.2016.7850112,Pedestrian detection aided by scale-discriminative network,2016 +266,VOC,voc,-33.88890695,151.18943366,University of Sydney,edu,17d4fd92352baf6f0039ec64d43ca572c8252384,citation,https://arxiv.org/pdf/1806.07049.pdf,MoE-SPNet: A mixture-of-experts scene parsing network,2018 +267,VOC,voc,47.05821,15.46019568,Graz University of Technology,edu,30a29f6c407749e97bc7c2db5674a62773af9d27,citation,http://pdfs.semanticscholar.org/30a2/9f6c407749e97bc7c2db5674a62773af9d27.pdf,Tracking and Visual Quality Inspection in Harsh Environments (print-version),2012 +268,VOC,voc,37.43131385,-122.16936535,Stanford University,edu,280d632ef3234c5ab06018c6eaccead75bc173b3,citation,http://pdfs.semanticscholar.org/6b1a/c8e438041ac02cc8fab5762ca069c386f473.pdf,Efficient Image and Video Co-localization with Frank-Wolfe Algorithm,2014 +269,VOC,voc,31.83907195,117.26420748,University of Science and Technology of China,edu,0f945f796a9343b51a3dc69941c0fa1a98c0f448,citation,http://pdfs.semanticscholar.org/a7ef/979ce52b9e4bcbd6ee5524dfd4e92baf6292.pdf,Local Hypersphere Coding Based on Edges between Visual Words,2012 +270,VOC,voc,-34.9189226,138.60423668,University of Adelaide,edu,0db6a58927a671c01089c53248b0e1c36bdc3231,citation,http://openaccess.thecvf.com/content_cvpr_2016/papers/Pham_Efficient_Point_Process_CVPR_2016_paper.pdf,Efficient Point Process Inference for Large-Scale Object Detection,2016 +271,VOC,voc,42.2942142,-83.71003894,University of Michigan,edu,14d0afea52c4e9b7a488f6398e4a92bd4f4b93c7,citation,https://arxiv.org/pdf/1804.07667.pdf,Rethinking the Faster R-CNN Architecture for Temporal Action Localization,2018 +272,VOC,voc,42.2942142,-83.71003894,University of Michigan,edu,8da1b0834688edb311a803532e33939e9ecf8292,citation,https://arxiv.org/pdf/1808.01244.pdf,CornerNet: Detecting Objects as Paired Keypoints,2018 +273,VOC,voc,39.2899685,-76.62196103,University of Maryland,edu,f42d3225afd9e463ddb7a355f64b54af8bd14227,citation,https://arxiv.org/pdf/1804.10343.pdf,Stacked U-Nets: A No-Frills Approach to Natural Image Segmentation,2018 +274,VOC,voc,31.83907195,117.26420748,University of Science and Technology of China,edu,a1dd88f44d045b360569a9a8721f728afbd951c3,citation,https://pdfs.semanticscholar.org/a1dd/88f44d045b360569a9a8721f728afbd951c3.pdf,Relief Impression Image Detection : Unsupervised Extracting Objects Directly From Feature Arrangements of Deep CNN,2016 +275,VOC,voc,34.0687788,-118.4450094,"University of California, Los Angeles",edu,fc027fccb19512a439fc17181c34ee1c3aad51b5,citation,https://arxiv.org/pdf/1708.03383.pdf,Joint Multi-person Pose Estimation and Semantic Part Segmentation,2017 +276,VOC,voc,39.329053,-76.619425,Johns Hopkins University,edu,377f2b65e6a9300448bdccf678cde59449ecd337,citation,https://arxiv.org/pdf/1804.10275.pdf,Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results,2018 +277,VOC,voc,40.47913175,-74.43168868,Rutgers University,edu,377f2b65e6a9300448bdccf678cde59449ecd337,citation,https://arxiv.org/pdf/1804.10275.pdf,Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results,2018 +278,VOC,voc,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,451eed7fd8ae281d1cc76ca8cdecbaf47816e55a,citation,http://pdfs.semanticscholar.org/451e/ed7fd8ae281d1cc76ca8cdecbaf47816e55a.pdf,Close Yet Distinctive Domain Adaptation,2017 +279,VOC,voc,35.9990522,-78.9290629,Duke University,edu,992b93ab9d016640551a8cebcaf4757288154f32,citation,http://pdfs.semanticscholar.org/e38c/f96363aaf1f17c487c484ad27d3175ca4b31.pdf,Nested Pictorial Structures,2012 +280,VOC,voc,43.08250655,-77.67121663,Rochester Institute of Technology,edu,7489990ea3d6ab4c1c86c9ed9f049399961dfaef,citation,https://people.rit.edu/ndcsma/pubs/WNYISPW_Nov_2014_Chew.pdf,Normalized cutswith soft must-link constraints for image segmentation and clustering,2014 +281,VOC,voc,59.34986645,18.07063213,"KTH Royal Institute of Technology, Stockholm",edu,41199678ad9370ff8ca7e9e3c2617b62a297fac3,citation,http://pdfs.semanticscholar.org/4119/9678ad9370ff8ca7e9e3c2617b62a297fac3.pdf,Multitask Deep Learning models for real-time deployment in embedded systems,2017 +282,VOC,voc,39.7487516,30.47653071,Eskisehir Osmangazi University,edu,7fb74f5abab4830e3cdaf477230e5571d9e3ca57,citation,http://openaccess.thecvf.com/content_cvpr_2017/papers/Cevikalp_Polyhedral_Conic_Classifiers_CVPR_2017_paper.pdf,Polyhedral Conic Classifiers for Visual Object Detection and Classification,2017 +283,VOC,voc,33.776033,-84.39884086,Georgia Institute of Technology,edu,10793d1475607929fedc6d9a677911ad16843e58,citation,http://openaccess.thecvf.com/content_cvpr_2016/papers/Li_Unsupervised_Learning_of_CVPR_2016_paper.pdf,Unsupervised Learning of Edges,2016 +284,VOC,voc,31.30104395,121.50045497,Fudan University,edu,c94fd258a8f1e8f4033a7fe491f1372dcf7d3cd6,citation,https://arxiv.org/pdf/1807.04897.pdf,TS ^2 2 C: Tight Box Mining with Surrounding Segmentation Context for Weakly Supervised Object Detection,2018 +285,VOC,voc,1.2962018,103.77689944,National University of Singapore,edu,c94fd258a8f1e8f4033a7fe491f1372dcf7d3cd6,citation,https://arxiv.org/pdf/1807.04897.pdf,TS ^2 2 C: Tight Box Mining with Surrounding Segmentation Context for Weakly Supervised Object Detection,2018 +286,VOC,voc,52.5180641,13.3250425,TU Berlin,edu,2581a12189eb1a0b5b27a7fd1c2cbe44c88fcc20,citation,http://arxiv.org/pdf/1512.00172v1.pdf,Analyzing Classifiers: Fisher Vectors and Deep Neural Networks,2016 +287,VOC,voc,32.0565957,118.77408833,Nanjing University,edu,96416b1b44fb05302c6e9a8ab1b74d9204995e73,citation,http://pdfs.semanticscholar.org/9641/6b1b44fb05302c6e9a8ab1b74d9204995e73.pdf,Learning Effective Binary Visual Representations with Deep Networks,2018 +288,VOC,voc,42.3619407,-71.0904378,MIT CSAIL,edu,aa2ddae22760249729ac2c2c4e24c8b665bcd40e,citation,https://pdfs.semanticscholar.org/8c47/635ae7f1641c2bdd45026ad7dbff70c24398.pdf,Interpretable Basis Decomposition for Visual Explanation,2018 +289,VOC,voc,42.2942142,-83.71003894,University of Michigan,edu,60542b1a857024c79db8b5b03db6e79f74ec8f9f,citation,https://arxiv.org/pdf/1702.05448.pdf,Learning to Detect Human-Object Interactions,2018 +290,VOC,voc,36.3693473,120.673818,Shandong University,edu,bd8a85acaa45d4068fca584e8d9e3bd3bb4eea4d,citation,http://pdfs.semanticscholar.org/bd8a/85acaa45d4068fca584e8d9e3bd3bb4eea4d.pdf,Toward Scene Recognition by Discovering Semantic Structures and Parts,2015 +291,VOC,voc,49.2767454,-122.91777375,Simon Fraser University,edu,bd8a85acaa45d4068fca584e8d9e3bd3bb4eea4d,citation,http://pdfs.semanticscholar.org/bd8a/85acaa45d4068fca584e8d9e3bd3bb4eea4d.pdf,Toward Scene Recognition by Discovering Semantic Structures and Parts,2015 +292,VOC,voc,55.94951105,-3.19534913,University of Edinburgh,edu,456abee9c8d31f004b2f0a3b47222043e20f5042,citation,https://arxiv.org/pdf/1603.09188.pdf,Unsupervised Visual Sense Disambiguation for Verbs using Multimodal Embeddings,2016 +293,VOC,voc,31.83907195,117.26420748,University of Science and Technology of China,edu,7c2f6424b0bb2c28f282fbc0b4e98bf85d5584eb,citation,http://pdfs.semanticscholar.org/a5ae/7d662ed086bc5b0c9a2c1dc54fcb23635000.pdf,Relief R-CNN: Utilizing Convolutional Feature Interrelationship for Fast Object Detection Deployment,2016 +294,VOC,voc,22.53521465,113.9315911,Shenzhen University,edu,7c2f6424b0bb2c28f282fbc0b4e98bf85d5584eb,citation,http://pdfs.semanticscholar.org/a5ae/7d662ed086bc5b0c9a2c1dc54fcb23635000.pdf,Relief R-CNN: Utilizing Convolutional Feature Interrelationship for Fast Object Detection Deployment,2016 +295,VOC,voc,37.5557271,127.0436642,Hanyang University,edu,59e9934720baf3c5df3a0e1e988202856e1f83ce,citation,https://arxiv.org/pdf/1511.04136.pdf,UA-DETRAC: A New Benchmark and Protocol for Multi-Object Detection and Tracking,2015 +296,VOC,voc,40.0141905,-83.0309143,University of Electronic Science and Technology of China,edu,d58c44bd9b464d9ac1db1344445c31364925f75a,citation,https://pdfs.semanticscholar.org/d58c/44bd9b464d9ac1db1344445c31364925f75a.pdf,TBN: Convolutional Neural Network with Ternary Inputs and Binary Weights,2018 +297,VOC,voc,37.43131385,-122.16936535,Stanford University,edu,81ba5202424906f64b77f68afca063658139fbb2,citation,https://arxiv.org/pdf/1611.09078.pdf,Social Scene Understanding: End-to-End Multi-person Action Localization and Collective Activity Recognition,2017 +298,VOC,voc,46.109237,7.08453549,IDIAP Research Institute,edu,81ba5202424906f64b77f68afca063658139fbb2,citation,https://arxiv.org/pdf/1611.09078.pdf,Social Scene Understanding: End-to-End Multi-person Action Localization and Collective Activity Recognition,2017 +299,VOC,voc,50.7338124,7.1022465,University of Bonn,edu,0b6f64c78c44dc043e2972fa7bfe2a5753768609,citation,https://doi.org/10.1109/ICPR.2016.7900008,A future for learning semantic models of man-made environments,2016 +300,VOC,voc,30.5097537,114.4062881,Huazhong University of Science and Technology,edu,016eb7b32d1fdec0899151fb03799378bf59bbe5,citation,http://pdfs.semanticscholar.org/016e/b7b32d1fdec0899151fb03799378bf59bbe5.pdf,Point Linking Network for Object Detection,2017 +301,VOC,voc,33.9928298,-81.02685168,University of South Carolina,edu,cd9d654c6a4250e0cf8bcfddc2afab9e70ee6cae,citation,http://pdfs.semanticscholar.org/cd9d/654c6a4250e0cf8bcfddc2afab9e70ee6cae.pdf,Object Detection with Mask-based Feature Encoding,2018 +302,VOC,voc,36.20304395,117.05842113,Tianjin University,edu,cd9d654c6a4250e0cf8bcfddc2afab9e70ee6cae,citation,http://pdfs.semanticscholar.org/cd9d/654c6a4250e0cf8bcfddc2afab9e70ee6cae.pdf,Object Detection with Mask-based Feature Encoding,2018 +303,VOC,voc,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,28737575297a20d431dd2b777a79a8be2c9c2bbd,citation,http://pdfs.semanticscholar.org/2873/7575297a20d431dd2b777a79a8be2c9c2bbd.pdf,Object Ranking on Deformable Part Models with Bagged LambdaMART,2014 +304,VOC,voc,40.11116745,-88.22587665,"University of Illinois, Urbana-Champaign",edu,46702e0127e16a4d6a1feda3ffc5f0f123957e87,citation,https://arxiv.org/pdf/1809.06131.pdf,Revisit Multinomial Logistic Regression in Deep Learning: Data Dependent Model Initialization for Image Recognition,2018 +305,VOC,voc,34.0687788,-118.4450094,"University of California, Los Angeles",edu,d2b2cb1d5cc1aa30cf5be7bcb0494198934caabb,citation,http://pdfs.semanticscholar.org/d2b2/cb1d5cc1aa30cf5be7bcb0494198934caabb.pdf,A Restricted Visual Turing Test for Deep Scene and Event Understanding,2015 +306,VOC,voc,37.8687126,-122.25586815,"University of California, Berkeley",edu,446fbff6a2a7c9989b0a0465f960e236d9a5e886,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Pathak_Context_Encoders_Feature_CVPR_2016_paper.pdf,Context Encoders: Feature Learning by Inpainting,2016 +307,VOC,voc,51.49887085,-0.17560797,Imperial College London,edu,291e5377df2eec4835b5c6889896941831a11c69,citation,http://pdfs.semanticscholar.org/291e/5377df2eec4835b5c6889896941831a11c69.pdf,Recovering 6D Object Pose: Multi-modal Analyses on Challenges,2017 +308,VOC,voc,40.9153196,-73.1270626,Stony Brook University,edu,b69fbf046faf685655b5fa52fef07fb77e75eff4,citation,http://pdfs.semanticscholar.org/b69f/bf046faf685655b5fa52fef07fb77e75eff4.pdf,Modeling guidance and recognition in categorical search: bridging human and computer object detection.,2013 +309,VOC,voc,39.7487516,30.47653071,Eskisehir Osmangazi University,edu,13bda03fc8984d5943ed8d02e49a779d27c84114,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6248047,Efficient object detection using cascades of nearest convex model classifiers,2012 +310,VOC,voc,50.7338124,7.1022465,University of Bonn,edu,87a66ccc68374ffb704ee6fb9fa7df369718095c,citation,http://pdfs.semanticscholar.org/ea90/16fb585ba6449d3d6f98bf85fa0bcd1f4621.pdf,Multi-person Pose Estimation with Local Joint-to-Person Associations,2016 +311,VOC,voc,39.9922379,116.30393816,Peking University,edu,4960ab1cef23e5ccd60173725ea280f462164a0e,citation,https://pdfs.semanticscholar.org/4960/ab1cef23e5ccd60173725ea280f462164a0e.pdf,Video Object Segmentation by Learning Location-Sensitive Embeddings,2018 +312,VOC,voc,39.977217,116.337632,Microsoft Research Asia,company,4960ab1cef23e5ccd60173725ea280f462164a0e,citation,https://pdfs.semanticscholar.org/4960/ab1cef23e5ccd60173725ea280f462164a0e.pdf,Video Object Segmentation by Learning Location-Sensitive Embeddings,2018 +313,VOC,voc,35.9990522,-78.9290629,Duke University,edu,8856fbf333b2aba7b9f1f746e16a2b7f083ee5b8,citation,http://pdfs.semanticscholar.org/8856/fbf333b2aba7b9f1f746e16a2b7f083ee5b8.pdf,Analyzing animal behavior via classifying each video frame using convolutional neural networks,2015 +314,VOC,voc,34.1235825,108.83546,Xidian University,edu,f9f01af981f8d25f0c96ea06d88be62dabb79256,citation,https://pdfs.semanticscholar.org/f9f0/1af981f8d25f0c96ea06d88be62dabb79256.pdf,Terahertz Image Detection with the Improved Faster Region-Based Convolutional Neural Network,2018 +315,VOC,voc,37.5600406,126.9369248,Yonsei University,edu,09066d7d0bb6273bf996c8538d7b34c38ea6a500,citation,https://arxiv.org/pdf/1809.01845.pdf,"Yes, IoU loss is submodular - as a function of the mispredictions",2018 +316,VOC,voc,49.2579566,7.04577417,Max Planck Institute for Informatics,edu,4aeebd1c9b4b936ed2e4d988d8d28e27f129e6f1,citation,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Chiu_See_the_Difference_ICCV_2015_paper.pdf,See the Difference: Direct Pre-Image Reconstruction and Pose Estimation by Differentiating HOG,2015 +317,VOC,voc,34.0687788,-118.4450094,"University of California, Los Angeles",edu,232ff2dab49cb5a1dae1012fd7ba53382909ec18,citation,http://pdfs.semanticscholar.org/232f/f2dab49cb5a1dae1012fd7ba53382909ec18.pdf,Semantic Video Segmentation from Occlusion Relations within a Convex Optimization Framework,2013 +318,VOC,voc,50.13053055,8.69234224,University of Frankfurt,edu,465c34c3334f29de28f973b7702a235509649429,citation,http://pdfs.semanticscholar.org/465c/34c3334f29de28f973b7702a235509649429.pdf,Stereopsis via deep learning,2013 +319,VOC,voc,47.6543238,-122.30800894,University of Washington,edu,caa2ded6d8d5de97c824d29b0c7a18d220c596c8,citation,https://arxiv.org/pdf/1709.02554.pdf,Learning to Segment Breast Biopsy Whole Slide Images,2018 +320,VOC,voc,44.48116865,-73.2002179,University of Vermont,edu,caa2ded6d8d5de97c824d29b0c7a18d220c596c8,citation,https://arxiv.org/pdf/1709.02554.pdf,Learning to Segment Breast Biopsy Whole Slide Images,2018 +321,VOC,voc,42.2942142,-83.71003894,University of Michigan,edu,289d833a35c2156b7e332e67d1cb099fd0683025,citation,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Chao_HICO_A_Benchmark_ICCV_2015_paper.pdf,HICO: A Benchmark for Recognizing Human-Object Interactions in Images,2015 +322,VOC,voc,37.8687126,-122.25586815,"University of California, Berkeley",edu,0fbdd4b8eb9e4c4cfbe5b76ab29ab8b0219fbdc0,citation,https://people.eecs.berkeley.edu/~pathak/papers/iccv15.pdf,Constrained Convolutional Neural Networks for Weakly Supervised Segmentation,2015 +323,VOC,voc,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,f94f79168c1cfaebb8eab5151e01d56478ab0b73,citation,http://pdfs.semanticscholar.org/f94f/79168c1cfaebb8eab5151e01d56478ab0b73.pdf,Optimizing Region Selection for Weakly Supervised Object Detection,2017 +324,VOC,voc,40.0044795,116.370238,Chinese Academy of Sciences,edu,6bb51f431f348b2b3e1db859827e80f97a576c30,citation,http://pdfs.semanticscholar.org/6bb5/1f431f348b2b3e1db859827e80f97a576c30.pdf,Irregular Convolutional Neural Networks,2017 +325,VOC,voc,22.42031295,114.20788644,Chinese University of Hong Kong,edu,b78e611c32dc0daf762cfa93044558cdb545d857,citation,http://pdfs.semanticscholar.org/b78e/611c32dc0daf762cfa93044558cdb545d857.pdf,Temporal Action Detection with Structured Segment Networks Supplementary Materials,2017 +326,VOC,voc,48.14955455,11.56775314,Technical University Munich,edu,bc12715a1ddf1a540dab06bf3ac4f3a32a26b135,citation,http://pdfs.semanticscholar.org/bc12/715a1ddf1a540dab06bf3ac4f3a32a26b135.pdf,Tracking the Trackers: An Analysis of the State of the Art in Multiple Object Tracking,2017 +327,VOC,voc,-34.9189226,138.60423668,University of Adelaide,edu,bc12715a1ddf1a540dab06bf3ac4f3a32a26b135,citation,http://pdfs.semanticscholar.org/bc12/715a1ddf1a540dab06bf3ac4f3a32a26b135.pdf,Tracking the Trackers: An Analysis of the State of the Art in Multiple Object Tracking,2017 +328,VOC,voc,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,4d1757aacbc49c74a5d4e53259c92ab0e47544da,citation,https://arxiv.org/pdf/1805.04310.pdf,Weakly and Semi Supervised Human Body Part Parsing via Pose-Guided Knowledge Transfer,2018 +329,VOC,voc,36.1112058,140.1055176,University of Tsukuba,edu,d392098688a999c70589c995bd4427c212eff69d,citation,http://pdfs.semanticscholar.org/d392/098688a999c70589c995bd4427c212eff69d.pdf,Object Repositioning Based on the Perspective in a Single Image,2014 +330,VOC,voc,22.42031295,114.20788644,Chinese University of Hong Kong,edu,1c1f21bf136fe2eec412e5f70fd918c27c5ccb0a,citation,http://pdfs.semanticscholar.org/1c1f/21bf136fe2eec412e5f70fd918c27c5ccb0a.pdf,Object Detection and Viewpoint Estimation with Auto-masking Neural Network,2014 +331,VOC,voc,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,1c1f21bf136fe2eec412e5f70fd918c27c5ccb0a,citation,http://pdfs.semanticscholar.org/1c1f/21bf136fe2eec412e5f70fd918c27c5ccb0a.pdf,Object Detection and Viewpoint Estimation with Auto-masking Neural Network,2014 +332,VOC,voc,51.49887085,-0.17560797,Imperial College London,edu,72e9acdd64e71fc2084acaf177aafaa2e075bd8c,citation,http://pdfs.semanticscholar.org/72e9/acdd64e71fc2084acaf177aafaa2e075bd8c.pdf,The 2017 Hands in the Million Challenge on 3D Hand Pose Estimation,2017 +333,VOC,voc,51.49887085,-0.17560797,Imperial College London,edu,0209389b8369aaa2a08830ac3b2036d4901ba1f1,citation,https://arxiv.org/pdf/1612.01202v2.pdf,DenseReg: Fully Convolutional Dense Shape Regression In-the-Wild,2017 +334,VOC,voc,51.5231607,-0.1282037,University College London,edu,0209389b8369aaa2a08830ac3b2036d4901ba1f1,citation,https://arxiv.org/pdf/1612.01202v2.pdf,DenseReg: Fully Convolutional Dense Shape Regression In-the-Wild,2017 +335,VOC,voc,50.7338124,7.1022465,University of Bonn,edu,07b8a9a225b738c4074a50cf80ee5fe516878421,citation,https://arxiv.org/pdf/1807.09169.pdf,Convolutional Simplex Projection Network for Weakly Supervised Semantic Segmentation,2018 +336,VOC,voc,37.43131385,-122.16936535,Stanford University,edu,1bd1645a629f1b612960ab9bba276afd4cf7c666,citation,http://arxiv.org/pdf/1506.04878.pdf,End-to-End People Detection in Crowded Scenes,2016 +337,VOC,voc,49.2579566,7.04577417,Max Planck Institute for Informatics,edu,1bd1645a629f1b612960ab9bba276afd4cf7c666,citation,http://arxiv.org/pdf/1506.04878.pdf,End-to-End People Detection in Crowded Scenes,2016 +338,VOC,voc,43.7776426,11.259765,University of Florence,edu,1bbe0371ca22c2fdb6e0d098049bbf6430324bdb,citation,http://doi.acm.org/10.1145/2906152,"Socializing the Semantic Gap: A Comparative Survey on Image Tag Assignment, Refinement and Retrieval",2016 +339,VOC,voc,37.43131385,-122.16936535,Stanford University,edu,1bbe0371ca22c2fdb6e0d098049bbf6430324bdb,citation,http://doi.acm.org/10.1145/2906152,"Socializing the Semantic Gap: A Comparative Survey on Image Tag Assignment, Refinement and Retrieval",2016 +340,VOC,voc,34.7275714,135.2371,Kobe University,edu,9954f7ee5288724184f9420e39cca9165efa6822,citation,http://www.me.cs.scitec.kobe-u.ac.jp/~takigu/pdf/2015/Th5_4.pdf,Estimation of object functions using deformable part model,2015 +341,VOC,voc,48.14955455,11.56775314,Technical University Munich,edu,e212b2bc41645fe467a73d004067fcf1ca77d87f,citation,http://pdfs.semanticscholar.org/e212/b2bc41645fe467a73d004067fcf1ca77d87f.pdf,Deep Active Contours,2016 +342,VOC,voc,55.94951105,-3.19534913,University of Edinburgh,edu,51c4ecf4539f56c4b1035b890f743b3a91dd758b,citation,http://arxiv.org/abs/1504.06434,Situational object boundary detection,2015 +343,VOC,voc,37.8687126,-122.25586815,"University of California, Berkeley",edu,007e86cb55f0ba0415a7764a1e9f9566c1e8784b,citation,http://pdfs.semanticscholar.org/2677/3023b17ba560bad6a679930710a9049abca5.pdf,Adversarial Feature Learning,2016 +344,VOC,voc,40.00229045,116.32098908,Tsinghua University,edu,54d97ea9a5f92761dddd148fb0e602c2293e7c16,citation,https://pdfs.semanticscholar.org/54d9/7ea9a5f92761dddd148fb0e602c2293e7c16.pdf,Associating Inter-image Salient Instances for Weakly Supervised Semantic Segmentation,2018 +345,VOC,voc,51.4879961,-3.17969747,Cardiff University,edu,54d97ea9a5f92761dddd148fb0e602c2293e7c16,citation,https://pdfs.semanticscholar.org/54d9/7ea9a5f92761dddd148fb0e602c2293e7c16.pdf,Associating Inter-image Salient Instances for Weakly Supervised Semantic Segmentation,2018 +346,VOC,voc,51.5231607,-0.1282037,University College London,edu,0e923b74fd41f73f57e22f66397feeea67e834f0,citation,http://pdfs.semanticscholar.org/0e92/3b74fd41f73f57e22f66397feeea67e834f0.pdf,Invariant encoding schemes for visual recognition,2012 +347,VOC,voc,34.0224149,-118.28634407,University of Southern California,edu,93cba94ff0ff96f865ce24ea01e9c006369d75ff,citation,https://arxiv.org/pdf/1803.03879.pdf,Knowledge Aided Consistency for Weakly Supervised Phrase Grounding,2018 +348,VOC,voc,35.704514,51.40972058,Amirkabir University of Technology,edu,24fc311970e097efc317c0f98d2df37b828bfbad,citation,https://arxiv.org/pdf/1709.08019v2.pdf,Semi-supervised hierarchical semantic object parsing,2017 +349,VOC,voc,37.4102193,-122.05965487,Carnegie Mellon University,edu,5c4d4fd37e8c80ae95c00973531f34a6d810ea3a,citation,https://arxiv.org/pdf/1603.09439.pdf,The Open World of Micro-Videos,2016 +350,VOC,voc,37.26728,126.9841151,Seoul National University,edu,71b973c87965e4086e75fd2379dd1bd8e3f8231e,citation,https://arxiv.org/pdf/1606.02393.pdf,Progressive Attention Networks for Visual Attribute Prediction,2018 +351,VOC,voc,37.4102193,-122.05965487,Carnegie Mellon University,edu,20c02e98602f6adf1cebaba075d45cef50de089f,citation,https://arxiv.org/pdf/1808.07507.pdf,Video Jigsaw: Unsupervised Learning of Spatiotemporal Context for Video Action Recognition,2018 +352,VOC,voc,33.776033,-84.39884086,Georgia Institute of Technology,edu,20c02e98602f6adf1cebaba075d45cef50de089f,citation,https://arxiv.org/pdf/1808.07507.pdf,Video Jigsaw: Unsupervised Learning of Spatiotemporal Context for Video Action Recognition,2018 +353,VOC,voc,47.6543238,-122.30800894,University of Washington,edu,c17ed26650a67e80151f5312fa15b5c423acc797,citation,http://pdfs.semanticscholar.org/c17e/d26650a67e80151f5312fa15b5c423acc797.pdf,Multiple-Kernel Based Vehicle Tracking Using 3D Deformable Model and Camera Self-Calibration,2017 +354,VOC,voc,36.05238585,140.11852361,Institute of Industrial Science,edu,c17ed26650a67e80151f5312fa15b5c423acc797,citation,http://pdfs.semanticscholar.org/c17e/d26650a67e80151f5312fa15b5c423acc797.pdf,Multiple-Kernel Based Vehicle Tracking Using 3D Deformable Model and Camera Self-Calibration,2017 +355,VOC,voc,35.9020448,139.93622009,University of Tokyo,edu,c17ed26650a67e80151f5312fa15b5c423acc797,citation,http://pdfs.semanticscholar.org/c17e/d26650a67e80151f5312fa15b5c423acc797.pdf,Multiple-Kernel Based Vehicle Tracking Using 3D Deformable Model and Camera Self-Calibration,2017 +356,VOC,voc,47.6423318,-122.1369302,Microsoft,company,c17ed26650a67e80151f5312fa15b5c423acc797,citation,http://pdfs.semanticscholar.org/c17e/d26650a67e80151f5312fa15b5c423acc797.pdf,Multiple-Kernel Based Vehicle Tracking Using 3D Deformable Model and Camera Self-Calibration,2017 +357,VOC,voc,31.21051105,29.91314562,Alexandria University,edu,0ce08f1cc6684495d12c2da157a056c7b88ffcd9,citation,http://pdfs.semanticscholar.org/0ce0/8f1cc6684495d12c2da157a056c7b88ffcd9.pdf,Multi-Modality Feature Transform: An Interactive Image Segmentation Approach,2015 +358,VOC,voc,1.3484104,103.68297965,Nanyang Technological University,edu,567078a51ea63b70396dca5dabb50a10a736d991,citation,https://pdfs.semanticscholar.org/1b5a/3bdb174df1ff36c1c101739d6daaec07760d.pdf,Conditional Generative Adversarial Network for Structured Domain Adaptation,2018 +359,VOC,voc,43.0008093,-78.7889697,University at Buffalo,edu,567078a51ea63b70396dca5dabb50a10a736d991,citation,https://pdfs.semanticscholar.org/1b5a/3bdb174df1ff36c1c101739d6daaec07760d.pdf,Conditional Generative Adversarial Network for Structured Domain Adaptation,2018 +360,VOC,voc,40.00229045,116.32098908,Tsinghua University,edu,6e4e5ef25f657de8fb383c8dfeb8e229eea28bb9,citation,https://arxiv.org/pdf/1707.01691.pdf,RON: Reverse Connection with Objectness Prior Networks for Object Detection,2017 +361,VOC,voc,50.0764296,14.41802312,Czech Technical University,edu,cf528f9fe6588b71efa94c219979ce111fc9c1c9,citation,http://pdfs.semanticscholar.org/cf52/8f9fe6588b71efa94c219979ce111fc9c1c9.pdf,On Evaluation of 6D Object Pose Estimation,2016 +362,VOC,voc,22.2081469,114.25964115,University of Hong Kong,edu,3b67645cd512898806aaf1df1811035f2d957f6b,citation,https://arxiv.org/pdf/1705.04043.pdf,SCNet: Learning Semantic Correspondence,2017 +363,VOC,voc,26.513188,80.23651945,Indian Institute of Technology Kanpur,edu,ef2e36daf429899bb48d80ce6804731c3f99bb85,citation,http://pdfs.semanticscholar.org/f7bd/b4df0fb5b3ff9fa0ebfe7c2a9ddc34c09a5c.pdf,"Debnath, Banerjee, Namboodiri: Adapting Ransac-svm to Detect Outliers for Robust Classification",2015 +364,VOC,voc,40.0044795,116.370238,Chinese Academy of Sciences,edu,79a3a07661b8c6a36070fd767344e15c847a30ef,citation,http://pdfs.semanticscholar.org/79a3/a07661b8c6a36070fd767344e15c847a30ef.pdf,Contextual Pooling in Image Classification,2012 +365,VOC,voc,13.0222347,77.56718325,Indian Institute of Science Bangalore,edu,5aa7f33cdc00787284b609aa63f5eb5c0a3212f6,citation,http://pdfs.semanticscholar.org/5aa7/f33cdc00787284b609aa63f5eb5c0a3212f6.pdf,Multiplicative mixing of object identity and image attributes in single inferior temporal neurons,2018 +366,VOC,voc,51.5247272,-0.03931035,Queen Mary University of London,edu,38f88655debf4bf32978a7b39fbd56aea6ee5752,citation,https://arxiv.org/pdf/1712.03162.pdf,Class Rectification Hard Mining for Imbalanced Deep Learning,2017 +367,VOC,voc,36.1244756,-97.05004383,Oklahoma State University,edu,7b3b2912c1d7a70839bc71a150e33f8634d0fff3,citation,https://pdfs.semanticscholar.org/7b3b/2912c1d7a70839bc71a150e33f8634d0fff3.pdf,Convolutional Neural Network-Based Embarrassing Situation Detection under Camera for Social Robot in Smart Homes,2018 +368,VOC,voc,40.00229045,116.32098908,Tsinghua University,edu,acdc333f7b32d987e65ce15f21db64e850ca9471,citation,https://pdfs.semanticscholar.org/acdc/333f7b32d987e65ce15f21db64e850ca9471.pdf,Direct Loss Minimization for Training Deep Neural Nets,2015 +369,VOC,voc,43.66333345,-79.39769975,University of Toronto,edu,acdc333f7b32d987e65ce15f21db64e850ca9471,citation,https://pdfs.semanticscholar.org/acdc/333f7b32d987e65ce15f21db64e850ca9471.pdf,Direct Loss Minimization for Training Deep Neural Nets,2015 +370,VOC,voc,28.2290209,112.99483204,"National University of Defense Technology, China",edu,da4137396f26bf3e76d04eeed0c94e11b7824aa6,citation,https://arxiv.org/pdf/1711.06828.pdf,Transferable Semi-Supervised Semantic Segmentation,2018 +371,VOC,voc,1.2962018,103.77689944,National University of Singapore,edu,da4137396f26bf3e76d04eeed0c94e11b7824aa6,citation,https://arxiv.org/pdf/1711.06828.pdf,Transferable Semi-Supervised Semantic Segmentation,2018 +372,VOC,voc,40.11571585,-88.22750772,Beckman Institute,edu,da4137396f26bf3e76d04eeed0c94e11b7824aa6,citation,https://arxiv.org/pdf/1711.06828.pdf,Transferable Semi-Supervised Semantic Segmentation,2018 +373,VOC,voc,40.9153196,-73.1270626,Stony Brook University,edu,5240941af3b263609acaa168f96e1decdb0b3fe4,citation,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W06/papers/Ge_Action_Classification_in_2015_CVPR_paper.pdf,Action classification in still images using human eye movements,2015 +374,VOC,voc,43.66333345,-79.39769975,University of Toronto,edu,126250d6077a6a68ae06277352eb42c4fa4c8b10,citation,http://pdfs.semanticscholar.org/1262/50d6077a6a68ae06277352eb42c4fa4c8b10.pdf,Learning Patch-based Structural Element Models with Hierarchical Palettes Abstract Learning Patch-based Structural Element Models with Hierarchical Palettes,2012 +375,VOC,voc,40.11116745,-88.22587665,"University of Illinois, Urbana-Champaign",edu,0cbbbfac2fe925479c6b34712e056f840a10fa4d,citation,https://pdfs.semanticscholar.org/0cbb/bfac2fe925479c6b34712e056f840a10fa4d.pdf,Quality Evaluation Methods for Crowdsourced Image Segmentation,2018 +376,VOC,voc,37.3936717,-122.0807262,Facebook,company,0cbbbfac2fe925479c6b34712e056f840a10fa4d,citation,https://pdfs.semanticscholar.org/0cbb/bfac2fe925479c6b34712e056f840a10fa4d.pdf,Quality Evaluation Methods for Crowdsourced Image Segmentation,2018 +377,VOC,voc,42.718568,-84.47791571,Michigan State University,edu,28df3f11894ce0c48dd8aee65a6ec76d9009cbbd,citation,https://arxiv.org/pdf/1809.08318.pdf,Recurrent Flow-Guided Semantic Forecasting,2018 +378,VOC,voc,42.30791465,-83.07176915,University of Windsor,edu,535ed3850e79ccd51922601546ef0fc48c5fb468,citation,http://arxiv.org/abs/1705.04301,A feature embedding strategy for high-level CNN representations from multiple convnets,2017 +379,VOC,voc,30.19331415,120.11930822,Zhejiang University,edu,535ed3850e79ccd51922601546ef0fc48c5fb468,citation,http://arxiv.org/abs/1705.04301,A feature embedding strategy for high-level CNN representations from multiple convnets,2017 +380,VOC,voc,-34.9189226,138.60423668,University of Adelaide,edu,247ca98c5a46616044cf6ae32b0d5b4140a7a161,citation,http://pdfs.semanticscholar.org/247c/a98c5a46616044cf6ae32b0d5b4140a7a161.pdf,High-performance Semantic Segmentation Using Very Deep Fully Convolutional Networks,2016 +381,VOC,voc,28.2290209,112.99483204,"National University of Defense Technology, China",edu,5f771fed91c8e4b666489ba2384d0705bcf75030,citation,https://arxiv.org/pdf/1804.03287.pdf,Understanding Humans in Crowded Scenes: Deep Nested Adversarial Learning and A New Benchmark for Multi-Human Parsing,2018 +382,VOC,voc,1.2962018,103.77689944,National University of Singapore,edu,5f771fed91c8e4b666489ba2384d0705bcf75030,citation,https://arxiv.org/pdf/1804.03287.pdf,Understanding Humans in Crowded Scenes: Deep Nested Adversarial Learning and A New Benchmark for Multi-Human Parsing,2018 +383,VOC,voc,51.6091578,-3.97934429,Swansea University,edu,d115c4a66d765fef596b0b171febca334cea15b5,citation,http://pdfs.semanticscholar.org/d115/c4a66d765fef596b0b171febca334cea15b5.pdf,Combining Stacked Denoising Autoencoders and Random Forests for Face Detection,2016 +384,VOC,voc,39.2899685,-76.62196103,University of Maryland,edu,e20ab84ac7fa0a5d36d4cf2266b7065c60e1c804,citation,https://pdfs.semanticscholar.org/e20a/b84ac7fa0a5d36d4cf2266b7065c60e1c804.pdf,Stacked U-Nets for Ground Material Segmentation in Remote Sensing Imagery,0 +385,VOC,voc,22.3386304,114.2620337,Hong Kong University of Science and Technology,edu,a1fdf45e6649b0020eb533c70d6062b9183561ff,citation,https://arxiv.org/pdf/1802.07931.pdf,Where's YOUR focus: Personalized Attention,2017 +386,VOC,voc,36.05238585,140.11852361,National Institute of Advanced Industrial Science and Technology,edu,775c51b965e8ff37646a265aab64136b4a620526,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2A_059_ext.pdf,Three viewpoints toward exemplar SVM,2015 +387,VOC,voc,28.59899755,-81.19712501,University of Central Florida,edu,0688c0568f3ab418719260d443cc0d86c3af2914,citation,https://arxiv.org/pdf/1707.09465.pdf,Curriculum Domain Adaptation for Semantic Segmentation of Urban Scenes,2017 +388,VOC,voc,37.4102193,-122.05965487,Carnegie Mellon University,edu,5d92531e74c4c2cdce91fdcd3c7ff090c8c29504,citation,http://pdfs.semanticscholar.org/5d92/531e74c4c2cdce91fdcd3c7ff090c8c29504.pdf,Synthesizing Scenes for Instance Detection,2017 +389,VOC,voc,58.38131405,26.72078081,University of Tartu,edu,c919a9f61656cdcd3a26076057ee006c48e8f609,citation,https://pdfs.semanticscholar.org/c919/a9f61656cdcd3a26076057ee006c48e8f609.pdf,High-Value Target Detection,2018 +390,VOC,voc,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,c6ce8eb37dafed09e1c55735fd1f1e9dc9c6bfe2,citation,https://arxiv.org/pdf/1707.07584.pdf,Joint background reconstruction and foreground segmentation via a two-stage convolutional neural network,2017 +391,VOC,voc,40.0044795,116.370238,Chinese Academy of Sciences,edu,c6ce8eb37dafed09e1c55735fd1f1e9dc9c6bfe2,citation,https://arxiv.org/pdf/1707.07584.pdf,Joint background reconstruction and foreground segmentation via a two-stage convolutional neural network,2017 +392,VOC,voc,55.7039571,13.1902011,Lund University,edu,c0006a2268d299644e9f1b455601bcbe89ddc2b5,citation,https://arxiv.org/pdf/1612.08871.pdf,Semantic Video Segmentation by Gated Recurrent Flow Propagation,2016 +393,VOC,voc,34.13710185,-118.12527487,California Institute of Technology,edu,273b9b7c63ac9196fb12734b49b74d0523ca4df4,citation,https://arxiv.org/pdf/1406.2807v2.pdf,The Secrets of Salient Object Segmentation,2014 +394,VOC,voc,34.0687788,-118.4450094,"University of California, Los Angeles",edu,273b9b7c63ac9196fb12734b49b74d0523ca4df4,citation,https://arxiv.org/pdf/1406.2807v2.pdf,The Secrets of Salient Object Segmentation,2014 +395,VOC,voc,33.59914655,130.22359848,Kyushu University,edu,e771661fa441f008c111ea786eb275153919da6e,citation,http://pdfs.semanticscholar.org/e771/661fa441f008c111ea786eb275153919da6e.pdf,Globally Optimal Object Tracking with Fully Convolutional Networks,2016 +396,VOC,voc,41.5007811,2.11143663,Universitat Autònoma de Barcelona,edu,5feacd9dd73827fb438a6bf6c8b406f4f11aa2fa,citation,http://pdfs.semanticscholar.org/5fea/cd9dd73827fb438a6bf6c8b406f4f11aa2fa.pdf,Slanted Stixels: Representing San Francisco's Steepest Streets,2017 +397,VOC,voc,47.3764534,8.54770931,ETH Zürich,edu,5feacd9dd73827fb438a6bf6c8b406f4f11aa2fa,citation,http://pdfs.semanticscholar.org/5fea/cd9dd73827fb438a6bf6c8b406f4f11aa2fa.pdf,Slanted Stixels: Representing San Francisco's Steepest Streets,2017 diff --git a/site/datasets/final/voc.json b/site/datasets/final/voc.json index 4f442739..92735af6 100644 --- a/site/datasets/final/voc.json +++ b/site/datasets/final/voc.json @@ -1 +1 @@ -{"id": "abe9f3b91fd26fa1b50cd685c0d20debfb372f73", "paper": {"paper_id": "abe9f3b91fd26fa1b50cd685c0d20debfb372f73", "key": "voc", "title": "The Pascal Visual Object Classes Challenge: A Retrospective", "year": 2014, "pdf": "http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf", "address": "", "name": "VOC"}, "address": null, "additional_papers": [], "citations": [{"id": "ed2f711cf9bcd9d7ab039d746af109ed9573421a", "title": "Pixel-Wise Classification Method for High Resolution Remote Sensing Imagery Using Deep Neural Networks", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/ed2f/711cf9bcd9d7ab039d746af109ed9573421a.pdf"}, {"id": "a6ac6463b5c89ac9eb013c978f213b309cc6a5c7", "title": "iSPA-Net: Iterative Semantic Pose Alignment Network", "addresses": [{"address": "Indian Institute of Science Bangalore", "lat": "13.02223470", "lng": "77.56718325", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1808.01134.pdf"}, {"id": "aaf4d938f2e66d158d5e635a9c1d279cdc7639c0", "title": "Toward visual understanding of everyday object", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/aaf4/d938f2e66d158d5e635a9c1d279cdc7639c0.pdf"}, {"id": "74dbcc09a3456ddacf5cece640b84045ebdf6be1", "title": "Characterizing Adversarial Examples Based on Spatial Consistency Information for Semantic Segmentation", "addresses": [{"address": "University of Michigan", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu"}, {"address": "Simon Fraser University", "lat": "49.27674540", "lng": "-122.91777375", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.05162.pdf"}, {"id": "dedc7b080b8e13d72f8dc33e248e7637d191fdbf", "title": "Beyond Dataset Bias: Multi-task Unaligned Shared Knowledge Transfer", "addresses": [{"address": "IDIAP Research Institute", "lat": "46.10923700", "lng": "7.08453549", "type": "edu"}, {"address": "University of Cambridge", "lat": "52.17638955", "lng": "0.14308882", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/dedc/7b080b8e13d72f8dc33e248e7637d191fdbf.pdf"}, {"id": "18c57ddc9c0164ee792661f43a5578f7a00d0330", "title": "ChestX-Ray8: Hospital-Scale Chest X-Ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases", "addresses": [{"address": "National Institutes of Health", "lat": "39.00041165", "lng": "-77.10327775", "type": "edu"}], "year": 2017, "pdf": "https://arxiv.org/pdf/1705.02315v2.pdf"}, {"id": "a825680aeb853fc34c65b5844c4c4391148f18c3", "title": "SSD-6D: Making RGB-Based 3D Detection and 6D Pose Estimation Great Again", "addresses": [{"address": "Toyota Research Institute", "lat": "37.40253645", "lng": "-122.11655107", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1711.10006.pdf"}, {"id": "f249c266321d661ae398c26ddb8c7409f6455ba1", "title": "Revisiting Faster R-CNN: A Deeper Look at Region Proposal Network", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/f249/c266321d661ae398c26ddb8c7409f6455ba1.pdf"}, {"id": "7fa5ede4a34dbe604ce317d529eed78db6642bc0", "title": "Soft Proposal Networks for Weakly Supervised Object Localization", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}, {"address": "Duke University", "lat": "35.99905220", "lng": "-78.92906290", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1709.01829.pdf"}, {"id": "05fdd29536d55fe3ad00689b6f60ada8bc761e91", "title": "HOGgles: Visualizing Object Detection Features", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2013, "pdf": "http://people.csail.mit.edu/torralba/publications/ihog_iccv.pdf"}, {"id": "394bf41cd8578ec10cd34452c688c3e3de1c16a7", "title": "Multi-view to Novel View: Synthesizing Novel Views With Self-learned Confidence", "addresses": [{"address": "National Tsing Hua University", "lat": "24.79254840", "lng": "120.99511830", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/394b/f41cd8578ec10cd34452c688c3e3de1c16a7.pdf"}, {"id": "2453dd38cde21f3248b55d281405f11d58168fa9", "title": "Multi-scale Patch Aggregation (MPA) for Simultaneous Detection and Segmentation", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.342"}, {"id": "ccb9ffa26b28dffc4f7d613821d1a9f0d60ea3f4", "title": "Online Adaptation of Convolutional Neural Networks for Video Object Segmentation", "addresses": [{"address": "RWTH Aachen University", "lat": "50.77917030", "lng": "6.06728733", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1706.09364.pdf"}, {"id": "d38af10096aa90dfccd7e4cec9757900bf6958bd", "title": "MultiPoseNet: Fast Multi-Person Pose Estimation Using Pose Residual Network", "addresses": [{"address": "Middle East Technical University", "lat": "39.87549675", "lng": "32.78553506", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.04067.pdf"}, {"id": "8c1e828a4826a1fb3eb47ee432f5333b974fa141", "title": "Spatial Graph for Image Classification", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/8c1e/828a4826a1fb3eb47ee432f5333b974fa141.pdf"}, {"id": "2a31b4bf2a294b6e67956a6cd5ed6d875af548e0", "title": "Learning Affinity via Spatial Propagation Networks", "addresses": [{"address": "Dalian University of Technology", "lat": "38.88140235", "lng": "121.52281098", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1710.01020.pdf"}, {"id": "0790c400bfe6fbefe88ef7791476e1abf1952089", "title": "Deep Gaussian Conditional Random Field Network: A Model-Based Deep Network for Discriminative Denoising", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "https://arxiv.org/pdf/1511.04067v1.pdf"}, {"id": "442cf9b24661c9ea5c2a1dcabd4a5b8af1cd89da", "title": "Beyond One-hot Encoding: lower dimensional target embedding", "addresses": [{"address": "University of Barcelona", "lat": "41.38689130", "lng": "2.16352385", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.10805.pdf"}, {"id": "04eda7eee3e0282de50e54554f50870dd17defa1", "title": "How Hard Can It Be? Estimating the Difficulty of Visual Search in an Image", "addresses": [{"address": "University of Edinburgh", "lat": "55.94951105", "lng": "-3.19534913", "type": "edu"}], "year": 2016, "pdf": "https://arxiv.org/pdf/1705.08280v1.pdf"}, {"id": "90a4125974564a5ab6c2ce2ff685fc36e9cf0680", "title": "Object Region Mining with Adversarial Erasing: A Simple Classification to Semantic Segmentation Approach", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "Beijing Jiaotong University", "lat": "39.94976005", "lng": "116.33629046", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1703.08448.pdf"}, {"id": "c3dd6c1ddbb9cfcc1bed6383ffaa0b1ce4d13625", "title": "TextSnake: A Flexible Representation for Detecting Text of Arbitrary Shapes", "addresses": [{"address": "Peking University", "lat": "39.99223790", "lng": "116.30393816", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.01544.pdf"}, {"id": "2976605dc3b73377696537291d45f09f1ab1fbf5", "title": "Cross-Stitch Networks for Multi-task Learning", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2016, "pdf": "http://www.ri.cmu.edu/pub_files/2016/6/multi-task.pdf"}, {"id": "25e9a2ec45c34d4610359196dc505a72c3833336", "title": "Benchmarking KAZE and MCM for Multiclass Classification", "addresses": [{"address": "Indian Institute of Technology Delhi", "lat": "28.54632595", "lng": "77.27325504", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/25e9/a2ec45c34d4610359196dc505a72c3833336.pdf"}, {"id": "935e639bebf905af2e35e8b1e7aa0538d7122185", "title": "A Network Structure to Explicitly Reduce Confusion Errors in Semantic Segmentation", "addresses": [{"address": "Beihang University", "lat": "39.98083330", "lng": "116.34101249", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1808.00313.pdf"}, {"id": "211435a4e14d00f4aaed191acfb548185ee800b9", "title": "Visual Saliency Based Multiple Objects Segmentation and its Parallel Implementation for Real-Time Vision Processing", "addresses": [{"address": "Akita Prefectural University", "lat": "39.80114990", "lng": "140.04591160", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/2114/35a4e14d00f4aaed191acfb548185ee800b9.pdf"}, {"id": "9fae24003bbedecdb617f9779215d79d06b90dd8", "title": "Where Are the Blobs: Counting by Localization with Point Supervision", "addresses": [{"address": "University of British Columbia", "lat": "49.25839375", "lng": "-123.24658161", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.09856.pdf"}, {"id": "c45681fa9d9c36a6a196017ef283ac38904f91bb", "title": "Pixel-wise object tracking", "addresses": [{"address": "New York University", "lat": "40.72925325", "lng": "-73.99625394", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1711.07377.pdf"}, {"id": "45f858f9e8d7713f60f52618e54089ba68dfcd6d", "title": "What Actions are Needed for Understanding Human Actions in Videos?", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2017, "pdf": "http://openaccess.thecvf.com/content_ICCV_2017/papers/Sigurdsson_What_Actions_Are_ICCV_2017_paper.pdf"}, {"id": "57bd01c042a5f64659b3a9f91c048b8594f762f6", "title": "Advances in fine-grained visual categorization", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/57bd/01c042a5f64659b3a9f91c048b8594f762f6.pdf"}, {"id": "9716416a15e79a36e3481bcdad79cdc905603e6d", "title": "Gaussian Word Embedding with a Wasserstein Distance Loss", "addresses": [{"address": "Fudan University", "lat": "31.30104395", "lng": "121.50045497", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1808.07016.pdf"}, {"id": "97265d64859e06900c11ae5bb5f03f3bd265f858", "title": "Multilabel Image Classification With Regional Latent Semantic Dependencies", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}, {"address": "University of Adelaide", "lat": "-34.91892260", "lng": "138.60423668", "type": "edu"}, {"address": "University of Technology Sydney", "lat": "-33.88096510", "lng": "151.20107299", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1612.01082.pdf"}, {"id": "a19904e76b5ded44e6aeb9af85997d160de6bb22", "title": "TernausNet: U-Net with VGG11 Encoder Pre-Trained on ImageNet for Image Segmentation", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/a199/04e76b5ded44e6aeb9af85997d160de6bb22.pdf"}, {"id": "96a9ca7a8366ae0efe6b58a515d15b44776faf6e", "title": "Grid Loss: Detecting Occluded Faces", "addresses": [{"address": "Graz University of Technology", "lat": "47.05821000", "lng": "15.46019568", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1609.00129.pdf"}, {"id": "513b8dc73a9fbc467e1ac130fe8c842b5839ca51", "title": "Dissertation Scalable Visual Navigation for Micro Aerial Vehicles using Geometric Prior Knowledge", "addresses": [{"address": "Graz University of Technology", "lat": "47.05821000", "lng": "15.46019568", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/513b/8dc73a9fbc467e1ac130fe8c842b5839ca51.pdf"}, {"id": "0ee3aa2a78f9680bb65a823bd9195c879572ec1c", "title": "What Makes an Object Memorable?", "addresses": [{"address": "University of California, Berkeley", "lat": "37.86871260", "lng": "-122.25586815", "type": "edu"}, {"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}, {"address": "University of California, Merced", "lat": "37.36566745", "lng": "-120.42158888", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Dubey_What_Makes_an_ICCV_2015_paper.pdf"}, {"id": "a776acc53591c3eb0b53501d9758d984e2e52a97", "title": "Weakly Supervised Instance Segmentation using Class Peak Response", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}, {"address": "Duke University", "lat": "35.99905220", "lng": "-78.92906290", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.00880.pdf"}, {"id": "423b941641728a21e37f41359a691815cdd84ceb", "title": "Reversible Recursive Instance-Level Object Segmentation", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": 2016, "pdf": "http://arxiv.org/abs/1511.04517"}, {"id": "666939690c564641b864eed0d60a410b31e49f80", "title": "What Visual Attributes Characterize an Object Class?", "addresses": [{"address": "Microsoft", "lat": "47.64233180", "lng": "-122.13693020", "type": "company"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/6669/39690c564641b864eed0d60a410b31e49f80.pdf"}, {"id": "51e8e8c4cac8260ef21c25f9f2a0a68aedbc6d58", "title": "Deep Generative Adversarial Compression Artifact Removal", "addresses": [{"address": "University of Florence", "lat": "43.77764260", "lng": "11.25976500", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1704.02518.pdf"}, {"id": "3b01a839d174dad6f2635cff7ebe7e1aaad701a4", "title": "Image Co-localization by Mimicking a Good Detector's Confidence Score Distribution", "addresses": [{"address": "University of Adelaide", "lat": "-34.91892260", "lng": "138.60423668", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/3b01/a839d174dad6f2635cff7ebe7e1aaad701a4.pdf"}, {"id": "d467035d83fb4e86c4a47b2ca87894388deb8c44", "title": "Relief R-CNN : Utilizing Convolutional Feature Interrelationship for Object Detection", "addresses": [{"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/d467/035d83fb4e86c4a47b2ca87894388deb8c44.pdf"}, {"id": "264a2b946fae4af23c646cc08fc56947b5be82cf", "title": "Robust object recognition in RGB-D egocentric videos based on Sparse Affine Hull Kernel", "addresses": [{"address": "University of Texas at Austin", "lat": "30.28415100", "lng": "-97.73195598", "type": "edu"}], "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2015.7301302"}, {"id": "480888bad59b314236f2d947ebf308ae146c98e4", "title": "Zoom Better to See Clearer: Human and Object Parsing with Hierarchical Auto-Zoom Net", "addresses": [{"address": "University of California, Los Angeles", "lat": "34.06877880", "lng": "-118.44500940", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1511.06881.pdf"}, {"id": "a1ee55d529e04a80f4eae3b30d0961a985a64fa4", "title": "Enabling low bitrate mobile visual recognition: a performance versus bandwidth evaluation", "addresses": [{"address": "National Taiwan University", "lat": "25.01682835", "lng": "121.53846924", "type": "edu"}], "year": 2013, "pdf": "http://www.cs.utexas.edu/~ycsu/publications/mm029-su.pdf"}, {"id": "0cd736baf31dceea1cc39ac72e00b65587f5fb9e", "title": "Learning Hash Functions Using Column Generation", "addresses": [{"address": "University of Adelaide", "lat": "-34.91892260", "lng": "138.60423668", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/4ad0/b6f189718a7287c6e7b90eb05331e56db334.pdf"}, {"id": "6424574cb92b316928c37232869bfadcb5b4c20f", "title": "C-WSL: Count-Guided Weakly Supervised Localization", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1711.05282.pdf"}, {"id": "51eba481dac6b229a7490f650dff7b17ce05df73", "title": "Situation Recognition: Visual Semantic Role Labeling for Image Understanding", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}], "year": 2016, "pdf": "http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf"}, {"id": "961a5d5750f18e91e28a767b3cb234a77aac8305", "title": "Face Detection without Bells and Whistles", "addresses": [{"address": "ETH Z\u00fcrich", "lat": "47.37645340", "lng": "8.54770931", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/961a/5d5750f18e91e28a767b3cb234a77aac8305.pdf"}, {"id": "0c05f60998628884a9ac60116453f1a91bcd9dda", "title": "Optimizing Open-Ended Crowdsourcing: The Next Frontier in Crowdsourced Data Management", "addresses": [{"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/7b19/80d4ac1730fd0145202a8cb125bf05d96f01.pdf"}, {"id": "efa2aacb0fbee857015fad1dba72767f56be6f39", "title": "Aggregating Crowdsourced Image Segmentations", "addresses": [{"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}, {"address": "Facebook", "lat": "37.39367170", "lng": "-122.08072620", "type": "company"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/efa2/aacb0fbee857015fad1dba72767f56be6f39.pdf"}, {"id": "17113b0f647ce05b2e50d1d40c856370f94da7de", "title": "Zoom Better to See Clearer: Human Part Segmentation with Auto Zoom Net", "addresses": [{"address": "University of California, Los Angeles", "lat": "34.06877880", "lng": "-118.44500940", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/1711/3b0f647ce05b2e50d1d40c856370f94da7de.pdf"}, {"id": "549d55a06c5402696e063ce36b411f341a64f8a9", "title": "Learning Deep Structure-Preserving Image-Text Embeddings", "addresses": [{"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}, {"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": 2016, "pdf": "http://arxiv.org/pdf/1511.06078v1.pdf"}, {"id": "44bfa5311f0921664e9036f63cadd71049a35f35", "title": "Faster R-CNN-Based Glomerular Detection in Multistained Human Whole Slide Images", "addresses": [{"address": "University of Tokyo", "lat": "35.90204480", "lng": "139.93622009", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/44bf/a5311f0921664e9036f63cadd71049a35f35.pdf"}, {"id": "133f1f2679892d408420d8092283539010723359", "title": "What Makes for Effective Detection Proposals?", "addresses": [{"address": "Max Planck Institute for Informatics", "lat": "49.25795660", "lng": "7.04577417", "type": "edu"}], "year": 2016, "pdf": "http://arxiv.org/pdf/1502.05082v3.pdf"}, {"id": "98d04187f091f402a90a6a9a2108393ca5f91563", "title": "ADVIO: An Authentic Dataset for Visual-Inertial Odometry", "addresses": [{"address": "Aalto University", "lat": "60.18558755", "lng": "24.82427330", "type": "edu"}, {"address": "Tampere University of Technology", "lat": "61.44964205", "lng": "23.85877462", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.09828.pdf"}, {"id": "f8015e31d1421f6aee5e17fc3907070b8e0a5e59", "title": "Towards Usable Multimedia Event Detection from Web Videos", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/f801/5e31d1421f6aee5e17fc3907070b8e0a5e59.pdf"}, {"id": "6b9e8acef979c13fa9ecc8fe9b635b312fedbcbe", "title": "Multiple Structured-Instance Learning for Semantic Segmentation with Uncertain Training Data", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": 2014, "pdf": "https://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Chang_Multiple_Structured-Instance_Learning_2014_CVPR_paper.pdf"}, {"id": "72fd97d21d6465d4bb407b6f8f3accd4419a2fb4", "title": "Automated Identification of Individual Great White Sharks from Unrestricted Fin Imagery", "addresses": [{"address": "University of Bristol", "lat": "51.45848370", "lng": "-2.60977520", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/384a/ea88ffd79295c99bcb80552f8655dbb87509.pdf"}, {"id": "62b83bf64f200ebb9fa16dfb7108b85e390b2207", "title": "Semantic Labeling in Very High Resolution Images via a Self-Cascaded Convolutional Neural Network", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11236.pdf"}, {"id": "2577211aeaaa1f2245ddc379564813bee3d46c06", "title": "Seeing through the Human Reporting Bias: Visual Classifiers from Noisy Human-Centric Labels", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Microsoft", "lat": "47.64233180", "lng": "-122.13693020", "type": "company"}], "year": 2016, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Misra_Seeing_Through_the_CVPR_2016_paper.pdf"}, {"id": "3900fb44902396f94fb070be41199b4beecc9081", "title": "Bottom-Up Top-Down Cues for Weakly-Supervised Semantic Segmentation", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1612.02101.pdf"}, {"id": "32c45df9e11e6751bcea1b928f398f6c134d22c6", "title": "Towards Unified Object Detection and Semantic Segmentation", "addresses": [{"address": "University of California, Los Angeles", "lat": "34.06877880", "lng": "-118.44500940", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/32c4/5df9e11e6751bcea1b928f398f6c134d22c6.pdf"}, {"id": "2bcd59835528c583bb5b310522a5ba6e99c58b15", "title": "Multi-class Open Set Recognition Using Probability of Inclusion", "addresses": [{"address": "Harvard University", "lat": "42.36782045", "lng": "-71.12666653", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/c0ef/596a212d0e40c79c6760673fe122e517b43c.pdf"}, {"id": "3920a205990abc7883c70cc96a0410a2d056c2a8", "title": "Fast Object Segmentation in Unconstrained Video", "addresses": [{"address": "University of Edinburgh", "lat": "55.94951105", "lng": "-3.19534913", "type": "edu"}], "year": 2013, "pdf": "http://groups.inf.ed.ac.uk/calvin/Publications/papazoglouICCV2013-camera-ready.pdf"}, {"id": "b6810adcfd507b2e019ebc8afe4f44f953faf946", "title": "ML-LocNet: Improving Object Localization with Multi-view Learning Network", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "University of Electronic Science and Technology of China", "lat": "40.01419050", "lng": "-83.03091430", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/b681/0adcfd507b2e019ebc8afe4f44f953faf946.pdf"}, {"id": "0e08cf0b19f0600dadce0f6694420d643ea9828b", "title": "The Middle Child Problem: Revisiting Parametric Min-Cut and Seeds for Object Proposals", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}, {"address": "Oregon State University", "lat": "45.51982890", "lng": "-122.67797964", "type": "edu"}], "year": 2015, "pdf": "http://openaccess.thecvf.com/content_iccv_2015/papers/Humayun_The_Middle_Child_ICCV_2015_paper.pdf"}, {"id": "81bf7a4b8b3c21d42cb82f946f762c94031e11b8", "title": "Segmentation of Nerve on Ultrasound Images Using Deep Adversarial Network", "addresses": [{"address": "Zhejiang University", "lat": "30.19331415", "lng": "120.11930822", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/81bf/7a4b8b3c21d42cb82f946f762c94031e11b8.pdf"}, {"id": "30d8fbb9345cdf1096635af7d39a9b04af9b72f9", "title": "Watching plants grow - a position paper on computer vision and Arabidopsis thaliana", "addresses": [{"address": "Aberystwyth University", "lat": "52.41073580", "lng": "-4.05295501", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/30d8/fbb9345cdf1096635af7d39a9b04af9b72f9.pdf"}, {"id": "87204e4e1a96b8f59cb91828199dacd192292231", "title": "Towards Real-Time Detection and Tracking of Basketball Players using Deep Neural Networks", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/8720/4e4e1a96b8f59cb91828199dacd192292231.pdf"}, {"id": "30a4637cbc461838c151073b265fb08e00492ff4", "title": "Weakly Supervised Object Localization with Progressive Domain Adaptation", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2016, "pdf": "http://faculty.ucmerced.edu/mhyang/papers/cvpr16_object_localization.pdf"}, {"id": "606cfdcc43203351dbb944a3bb3719695e557e37", "title": "Ex Paucis Plura : Learning Affordance Segmentation from Very Few Examples", "addresses": [{"address": "University of Bonn", "lat": "50.73381240", "lng": "7.10224650", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/606c/fdcc43203351dbb944a3bb3719695e557e37.pdf"}, {"id": "47b6cd69c0746688f6e17b37d73fa12422826dbc", "title": "Self corrective Perturbations for Semantic Segmentation and Classification", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}, {"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}, {"address": "GE Global Research Center", "lat": "42.82982480", "lng": "-73.87719385", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/47b6/cd69c0746688f6e17b37d73fa12422826dbc.pdf"}, {"id": "14421119527aa5882e1552a651fbd2d73bc94637", "title": "Searching for objects driven by context", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}, {"address": "University of Edinburgh", "lat": "55.94951105", "lng": "-3.19534913", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/9b81/86b6bc1e05d7a473d2afebc8a12698d88691.pdf"}, {"id": "3410a1489d04ec6fcfbb3d76d39055117931ccf0", "title": "Learning Collections of Part Models for Object Recognition", "addresses": [{"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": 2013, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2013.126"}, {"id": "69b647afe6526256a93033eac14ce470204e7bae", "title": "Training Deep Neural Networks via Direct Loss Minimization", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}, {"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d7dd/4fb9074db71ebf9155d64b439102d4c7b0c5.pdf"}, {"id": "81825711c2aaa1b9d3ead1a300e71c4353a41382", "title": "End-to-end training of object class detectors for mean average precision", "addresses": [{"address": "University of Edinburgh", "lat": "55.94951105", "lng": "-3.19534913", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1607.03476.pdf"}, {"id": "2ce073da76e6ed87eda2da08da0e00f4f060f1a6", "title": "Deep Saliency with Encoded Low Level Distance Map and High Level Features", "addresses": [{"address": "SenseTime", "lat": "39.99300800", "lng": "116.32988200", "type": "company"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.78"}, {"id": "2313c827d3cb9a291b6a00d015c29580862bbdcc", "title": "Weakly- and Semi-supervised Panoptic Segmentation", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1808.03575.pdf"}, {"id": "839a2155995acc0a053a326e283be12068b35cb8", "title": "Handcrafted Local Features are Convolutional Neural Networks", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/839a/2155995acc0a053a326e283be12068b35cb8.pdf"}, {"id": "634e02d6107529d672cbbdf5b97990966e289829", "title": "Cost-Effective Training of Deep CNNs with Active Model Adaptation", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1802.05394.pdf"}, {"id": "d0137881f6c791997337b9cc7f1efbd61977270d", "title": "University of Dundee An automated pattern recognition system for classifying indirect immunofluorescence images for HEp-2 cells and specimens Manivannan,", "addresses": [{"address": "University of Dundee", "lat": "56.45796755", "lng": "-2.98214831", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d013/7881f6c791997337b9cc7f1efbd61977270d.pdf"}, {"id": "ed173a39f4cd980eef319116b6ba39cec1b37c42", "title": "Associative Embedding: End-to-End Learning for Joint Detection and Grouping", "addresses": [{"address": "University of Michigan", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu"}, {"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1611.05424.pdf"}, {"id": "84cf838be40e2ab05732fbefbb93ccb2afb0cb48", "title": "Recognizing Handwritten Characters", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/84cf/838be40e2ab05732fbefbb93ccb2afb0cb48.pdf"}, {"id": "b082f440ee91e2751701401919584203b37e1e1a", "title": "SeedNet : Automatic Seed Generation with Deep Reinforcement Learning for Robust Interactive Segmentation", "addresses": [{"address": "Seoul National University", "lat": "37.26728000", "lng": "126.98411510", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/303c/28f1ba643a7cd88255cc379e79052fb7e7b1.pdf"}, {"id": "6008213e4270e88cb414459de759c961469b92dd", "title": "Multi-Evidence Filtering and Fusion for Multi-Label Classification, Object Detection and Semantic Segmentation Based on Weakly Supervised Learning", "addresses": [{"address": "University of Hong Kong", "lat": "22.20814690", "lng": "114.25964115", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1802.09129.pdf"}, {"id": "90b4470032f2796a347a0080bcd833c2db0e8bf0", "title": "Improving Image Clustering With Multiple Pretrained CNN Feature Extractors", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.07760.pdf"}, {"id": "beecaf2d6e9d102b6b2459ea38e15179a4b55ffd", "title": "Surveillance Video Parsing with Single Frame Supervision", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1611.09587.pdf"}, {"id": "0fb8317a8bf5feaf297af8e9b94c50c5ed0e8277", "title": "Detecting Hands in Egocentric Videos: Towards Action Recognition", "addresses": [{"address": "University of Barcelona", "lat": "41.38689130", "lng": "2.16352385", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/0fb8/317a8bf5feaf297af8e9b94c50c5ed0e8277.pdf"}, {"id": "0e0179eb4b43016691f0f1473a08089dda21f8f0", "title": "The Art of Detection", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/0e01/79eb4b43016691f0f1473a08089dda21f8f0.pdf"}, {"id": "135c957f6a80f250507c7707479e584c288f430f", "title": "Image-Based Synthesis and Re-synthesis of Viewpoints Guided by 3D Models", "addresses": [{"address": "Max Planck Institute for Informatics", "lat": "49.25795660", "lng": "7.04577417", "type": "edu"}], "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.498"}, {"id": "c72b063e23b8b45b57a42ebc2f9714297c539a6f", "title": "TieNet: Text-Image Embedding Network for Common Thorax Disease Classification and Reporting in Chest X-rays", "addresses": [{"address": "National Institutes of Health", "lat": "39.00041165", "lng": "-77.10327775", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1801.04334.pdf"}, {"id": "061ffd3967540424ac4e4066f4a605d8318bab90", "title": "Dirichlet-Based Histogram Feature Transform for Image Classification", "addresses": [{"address": "National Institute of Advanced Industrial Science and Technology", "lat": "36.05238585", "lng": "140.11852361", "type": "edu"}], "year": 2014, "pdf": "https://staff.aist.go.jp/takumi.kobayashi/publication/2014/CVPR2014.pdf"}, {"id": "1a2e9a56e5f71bf95a2f68b6e67e2aaa1c6bf91e", "title": "FPM: Fine Pose Parts-Based Model with 3D CAD Models", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/1a2e/9a56e5f71bf95a2f68b6e67e2aaa1c6bf91e.pdf"}, {"id": "c6f58adf4a5ee8499cbc9b9bc1e6f1c39f1f8eae", "title": "Earn to P Ay a Ttention", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/c6f5/8adf4a5ee8499cbc9b9bc1e6f1c39f1f8eae.pdf"}, {"id": "3c8db2ca155ce4e15ec8a2c4c4b979de654fb296", "title": "Holistically-Nested Edge Detection", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": 2015, "pdf": "http://pages.ucsd.edu/~ztu/publication/iccv15_hed.pdf"}, {"id": "8ccd6aaf1ee4b66c13fffbf560e3920f9bdf5f10", "title": "A multitask deep learning model for real-time deployment in embedded systems", "addresses": [{"address": "KTH Royal Institute of Technology, Stockholm", "lat": "59.34986645", "lng": "18.07063213", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/8ccd/6aaf1ee4b66c13fffbf560e3920f9bdf5f10.pdf"}, {"id": "b4f5cf797a1c857f32e5740d53d9990bc925af2b", "title": "Review of Segmentation with Deep Learning and Discover Its Application in Ultrasound Images", "addresses": [{"address": "University of Alberta", "lat": "53.52385720", "lng": "-113.52282665", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/b4f5/cf797a1c857f32e5740d53d9990bc925af2b.pdf"}, {"id": "3bad18554678ab46bbbf9de41d36423bc8083c83", "title": "Weakly Supervised Object Boundaries", "addresses": [{"address": "Max Planck Institute for Informatics", "lat": "49.25795660", "lng": "7.04577417", "type": "edu"}], "year": 2016, "pdf": "http://arxiv.org/pdf/1511.07803v1.pdf"}, {"id": "07191c2047b5b643dd72a0583c1d537ba59f977a", "title": "Interactive Segmentation from 1-Bit Feedback", "addresses": [{"address": "National Tsing Hua University", "lat": "24.79254840", "lng": "120.99511830", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/0719/1c2047b5b643dd72a0583c1d537ba59f977a.pdf"}, {"id": "ae6e8851dfd9c97e37e1cbd61b21cc54d5e2b9c7", "title": "Paraphrasing Complex Network: Network Compression via Factor Transfer", "addresses": [{"address": "Seoul National University", "lat": "37.26728000", "lng": "126.98411510", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1802.04977.pdf"}, {"id": "5375a3344017d9502ebb4170325435de3da1fa16", "title": "Computer Vision \u2013 ACCV 2012", "addresses": [{"address": "Seoul National University", "lat": "37.26728000", "lng": "126.98411510", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": 2012, "pdf": "https://doi.org/10.1007/978-3-642-37447-0"}, {"id": "fdfd57d4721174eba288e501c0c120ad076cdca8", "title": "An Analysis of Action Recognition Datasets for Language and Vision Tasks", "addresses": [{"address": "University of Edinburgh", "lat": "55.94951105", "lng": "-3.19534913", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1704.07129.pdf"}, {"id": "ec83c63e28ae2a658bc76a6750e078c3a54b9760", "title": "Deep Descriptor Transforming for Image Co-Localization", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}, {"address": "University of Adelaide", "lat": "-34.91892260", "lng": "138.60423668", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1705.02758.pdf"}, {"id": "b1177aad0db8bd6b605ffe0d68addaf97b1f9a6b", "title": "Visual Representations and Models: From Latent SVM to Deep Learning", "addresses": [{"address": "KTH Royal Institute of Technology, Stockholm", "lat": "59.34986645", "lng": "18.07063213", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/5035/733022916db7e5965c565327e169da1e2f39.pdf"}, {"id": "a5ae7d662ed086bc5b0c9a2c1dc54fcb23635000", "title": "Relief R-CNN : Utilizing Convolutional Feature Interrelationship for Fast Object Detection Deployment", "addresses": [{"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}, {"address": "Shenzhen University", "lat": "22.53521465", "lng": "113.93159110", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/a5ae/7d662ed086bc5b0c9a2c1dc54fcb23635000.pdf"}, {"id": "9528e2e8c20517ab916f803c0371abb4f0ed488b", "title": "Shallow and Deep Convolutional Networks for Saliency Prediction", "addresses": [{"address": "Dublin City University", "lat": "53.38522185", "lng": "-6.25740874", "type": "edu"}], "year": 2016, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Pan_Shallow_and_Deep_CVPR_2016_paper.pdf"}, {"id": "e2272f50ffa33b8e41509e4b795ad5a4eb27bb46", "title": "Region-based semantic segmentation with end-to-end training", "addresses": [{"address": "University of Edinburgh", "lat": "55.94951105", "lng": "-3.19534913", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1607.07671.pdf"}, {"id": "b8d61dc56a4112e0317c6a7323417ee649476148", "title": "Cross Pixel Optical Flow Similarity for Self-Supervised Learning", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.05636.pdf"}, {"id": "db0a4af734dab1854c2e8dfe499fe0e353226e45", "title": "Hot Anchors: A Heuristic Anchors Sampling Method in RCNN-Based Object Detection", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/db0a/4af734dab1854c2e8dfe499fe0e353226e45.pdf"}, {"id": "ffe0f43206169deef3a2bf64cec90fe35bb1a8e5", "title": "Automated Processing of Imaging Data through Multi-tiered Classification of Biological Structures Illustrated Using Caenorhabditis elegans\n", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}, {"address": "University of Edinburgh", "lat": "55.94951105", "lng": "-3.19534913", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/ffe0/f43206169deef3a2bf64cec90fe35bb1a8e5.pdf"}, {"id": "479eb6579194d4d944671dfe5e90b122ca4b58fd", "title": "Structural inference embedded adversarial networks for scene parsing", "addresses": [{"address": "Harbin Engineering University", "lat": "45.77445695", "lng": "126.67684917", "type": "edu"}, {"address": "Northwestern Polytechnical University", "lat": "34.24691520", "lng": "108.91061982", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/479e/b6579194d4d944671dfe5e90b122ca4b58fd.pdf"}, {"id": "d289ce63055c10937e5715e940a4bb9d0af7a8c5", "title": "DeepMon: Mobile GPU-based Deep Learning Framework for Continuous Vision Applications", "addresses": [{"address": "Singapore Management University", "lat": "1.29500195", "lng": "103.84909214", "type": "edu"}], "year": "2017", "pdf": "http://dl.acm.org/citation.cfm?id=3081360"}, {"id": "061bba574c7c2ef0ba9de91afc4fcab70feddd4f", "title": "Paying Attention to Descriptions Generated by Image Captioning Models", "addresses": [{"address": "Aalto University", "lat": "60.18558755", "lng": "24.82427330", "type": "edu"}, {"address": "University of Central Florida", "lat": "28.59899755", "lng": "-81.19712501", "type": "edu"}], "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCV.2017.272"}, {"id": "ee2217f9d22d6a18aaf97f05768035c38305d1fa", "title": "Detection of facial parts via deformable part model using part annotation", "addresses": [{"address": "Kobe University", "lat": "34.72757140", "lng": "135.23710000", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/APSIPA.2015.7415501"}, {"id": "18219d85bb14f851fc4714df19cc7f38dff8ddc3", "title": "Online Adaptation of Convolutional Neural Networks for the 2017 DAVIS Challenge on Video Object Segmentation", "addresses": [{"address": "RWTH Aachen University", "lat": "50.77917030", "lng": "6.06728733", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/1821/9d85bb14f851fc4714df19cc7f38dff8ddc3.pdf"}, {"id": "da44881db32c132eb9cdef524618e3c8ed340b47", "title": "Annotation-Free and One-Shot Learning for Instance Segmentation of Homogeneous Object Clusters", "addresses": [{"address": "Shanghai Jiao Tong University", "lat": "31.20081505", "lng": "121.42840681", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1802.00383.pdf"}, {"id": "cc94b423c298003f0f164e63e63177d443291a77", "title": "Multi-View Semantic Labeling of 3D Point Clouds for Automated Plant Phenotyping", "addresses": [{"address": "University of Bonn", "lat": "50.73381240", "lng": "7.10224650", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.03994.pdf"}, {"id": "83a811fd947415df2413d15386dbc558f07595cb", "title": "Fine-grained Discriminative Localization via Saliency-guided Faster R-CNN", "addresses": [{"address": "Peking University", "lat": "39.99223790", "lng": "116.30393816", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1709.08295.pdf"}, {"id": "3a5f5aca6138abcf22ede1af5572e01eb0f761d1", "title": "Optimizing Multivariate Performance Measures from Multi-View Data", "addresses": [{"address": "University of Technology Sydney", "lat": "-33.88096510", "lng": "151.20107299", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/3a5f/5aca6138abcf22ede1af5572e01eb0f761d1.pdf"}, {"id": "ce300b006f42c1b64ca0e53d1cf28d11a98ece8f", "title": "Learning Multi-Instance Enriched Image Representations via Non-Greedy Ratio Maximization of the l 1-Norm Distances", "addresses": [{"address": "Northwestern Polytechnical University", "lat": "34.24691520", "lng": "108.91061982", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/ce30/0b006f42c1b64ca0e53d1cf28d11a98ece8f.pdf"}, {"id": "71b038958df0b7855fc7b8b8e7dcde8537a7c1ad", "title": "Kernel Methods for Unsupervised Domain Adaptation by Boqing Gong", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/71b0/38958df0b7855fc7b8b8e7dcde8537a7c1ad.pdf"}, {"id": "af7cab9b4a2a2a565a3efe0a226c517f47289077", "title": "Deep Unsupervised Saliency Detection: A Multiple Noisy Labeling Perspective", "addresses": [{"address": "Northwestern Polytechnical University", "lat": "34.24691520", "lng": "108.91061982", "type": "edu"}, {"address": "Australian National University", "lat": "-35.27769990", "lng": "149.11852700", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.10910.pdf"}, {"id": "3a6ebdfb6375093885e846153a48139ef1ecfae6", "title": "The treasure beneath convolutional layers: Cross-convolutional-layer pooling for image classification", "addresses": [{"address": "University of Adelaide", "lat": "-34.91892260", "lng": "138.60423668", "type": "edu"}], "year": 2015, "pdf": "http://arxiv.org/abs/1411.7466"}, {"id": "a7e9d230bc44dfbe56757f3025d5b4caa49032f3", "title": "Unity in Diversity: Discovering Topics from Words - Information Theoretic Co-clustering for Visual Categorization", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/a7e9/d230bc44dfbe56757f3025d5b4caa49032f3.pdf"}, {"id": "50137d663802224e683951c48970496b38b02141", "title": "DETRAC: A New Benchmark and Protocol for Multi-Object Tracking", "addresses": [{"address": "Hanyang University", "lat": "37.55572710", "lng": "127.04366420", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/5013/7d663802224e683951c48970496b38b02141.pdf"}, {"id": "07de8371ad4901356145722aa29abaeafd0986b9", "title": "Towards Usable Multimedia Event Detection", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/07de/8371ad4901356145722aa29abaeafd0986b9.pdf"}, {"id": "af386bb1b5e8c9f65b3ae836198a93aa860d6331", "title": "Revisiting Dilated Convolution: A Simple Approach for Weakly- and Semi- Supervised Semantic Segmentation", "addresses": [{"address": "IBM Thomas J. Watson Research Center", "lat": "41.21002475", "lng": "-73.80407056", "type": "company"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.04574.pdf"}, {"id": "d6b1b0e60e1764982ef95d4ade8fcaa10bfb156a", "title": "A Sketch-based Approach for Multimedia Retrieval", "addresses": [{"address": "International Institute of Information Technology", "lat": "17.44549570", "lng": "78.34854698", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d6b1/b0e60e1764982ef95d4ade8fcaa10bfb156a.pdf"}, {"id": "37b3637dab65b91a5c91bb6a583e69c448823cc1", "title": "Learning a Hierarchical Latent-Variable Model of 3D Shapes", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1705.05994.pdf"}, {"id": "83d16fb8f53156c9e2b28d75abb6532af515440f", "title": "Large-scale Document Labeling using Supervised Sequence Embedding", "addresses": [{"address": "Drexel University", "lat": "39.95740000", "lng": "-75.19026706", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/83d1/6fb8f53156c9e2b28d75abb6532af515440f.pdf"}, {"id": "05e45f61dc7577c50114a382abc6e952ae24cdac", "title": "Object Detection and Recognition in Natural Settings by George William Dittmar A thesis submitted in partial fulfilment of the requirements of the degree Master of Science in Computer Science Thesis Committee: Melanie Mitchell, Chair", "addresses": [{"address": "Portland State University", "lat": "45.51181205", "lng": "-122.68492999", "type": "edu"}], "year": "2012", "pdf": "https://pdfs.semanticscholar.org/05e4/5f61dc7577c50114a382abc6e952ae24cdac.pdf"}, {"id": "192235f5a9e4c9d6a28ec0d333e36f294b32f764", "title": "Reconfiguring the Imaging Pipeline for Computer Vision", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": 2017, "pdf": "http://www.andrew.cmu.edu/user/sjayasur/iccv.pdf"}, {"id": "bd4f2e7a196c0d6033a49390ee8836f4f551b7c8", "title": "ICDAR 2015 competition on Robust Reading", "addresses": [{"address": "Czech Technical University", "lat": "50.07642960", "lng": "14.41802312", "type": "edu"}, {"address": "Kyushu University", "lat": "33.59914655", "lng": "130.22359848", "type": "edu"}], "year": 2015, "pdf": "http://rrc.cvc.uab.es/files/Robust-Reading-Competition-Karatzas.pdf"}, {"id": "3d5575e9ba02128d94c20330f4525fc816411ec2", "title": "Learning Video Object Segmentation from Static Images", "addresses": [{"address": "Max Planck Institute for Informatics", "lat": "49.25795660", "lng": "7.04577417", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1612.02646.pdf"}, {"id": "78f62042bfb3bb49ba10e142d118a9bb058b2a19", "title": "WebSeg: Learning Semantic Segmentation from Web Searches", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/78f6/2042bfb3bb49ba10e142d118a9bb058b2a19.pdf"}, {"id": "0c7aac75ccd17d696cff2e1ce95db0493f5c18a2", "title": "VideoMatch: Matching Based Video Object Segmentation", "addresses": [{"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.01123.pdf"}, {"id": "6c78add400f749c897dc3eb93996eda1c796e91c", "title": "Enhanced Random Forest with Image/Patch-Level Learning for Image Understanding", "addresses": [{"address": "University of Malaya", "lat": "3.12267405", "lng": "101.65356103", "type": "edu"}, {"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Peking University", "lat": "39.99223790", "lng": "116.30393816", "type": "edu"}], "year": "2014", "pdf": "https://arxiv.org/pdf/1410.3752.pdf"}, {"id": "b61c0b11b1c25958d202b4f7ca772e1d95ee1037", "title": "Bridging Category-level and Instance-level Semantic Image Segmentation", "addresses": [{"address": "University of Adelaide", "lat": "-34.91892260", "lng": "138.60423668", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/b61c/0b11b1c25958d202b4f7ca772e1d95ee1037.pdf"}, {"id": "79894ddf290d3c7a768d634eceb7888564b5cf19", "title": "Query-Guided Regression Network with Context Policy for Phrase Grounding", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.01676.pdf"}, {"id": "fec2a5a06a3aab5efe923a78d208ec747d5e4894", "title": "Generalizing to Unseen Domains via Adversarial Data Augmentation", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.12018.pdf"}, {"id": "5ac63895a7d3371a739d066bb1631fc178d8276a", "title": "Learning Semantic Feature Map for Visual Content Recognition", "addresses": [{"address": "Fudan University", "lat": "31.30104395", "lng": "121.50045497", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2017, "pdf": "http://doi.acm.org/10.1145/3123266.3123379"}, {"id": "4e559f23bcf502c752f2938ad7f0182047b8d1e4", "title": "A Fast Approximate AIB Algorithm for Distributional Word Clustering", "addresses": [{"address": "University of Wollongong", "lat": "-34.40505545", "lng": "150.87834655", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Wang_A_Fast_Approximate_2013_CVPR_paper.pdf"}, {"id": "7536b6a9f3cb4ae810e2ef6d0219134b4e546dd0", "title": "Semi-Automatic Image Labelling Using Depth Information", "addresses": [{"address": "Australian National University", "lat": "-35.27769990", "lng": "149.11852700", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/7536/b6a9f3cb4ae810e2ef6d0219134b4e546dd0.pdf"}, {"id": "11b89011298e193d9e6a1d99302221c1d8645bda", "title": "Structured Feature Selection", "addresses": [{"address": "Rensselaer Polytechnic Institute", "lat": "42.72984590", "lng": "-73.67950216", "type": "edu"}], "year": 2015, "pdf": "http://openaccess.thecvf.com/content_iccv_2015/papers/Gao_Structured_Feature_Selection_ICCV_2015_paper.pdf"}, {"id": "de3245c795bc50ebdb5d929c8da664341238264a", "title": "Generative Model With Coordinate Metric Learning for Object Recognition Based on 3D Models", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1705.08590.pdf"}, {"id": "cc2eaa182f33defbb33d69e9547630aab7ed9c9c", "title": "Surpassing Humans and Computers with JELLYBEAN: Crowd-Vision-Hybrid Counting Algorithms", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}, {"address": "Ohio State University", "lat": "40.00471095", "lng": "-83.02859368", "type": "edu"}, {"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/ce2e/e807a63bbdffa530c80915b04d11a7f29a21.pdf"}, {"id": "9c71e6f4e27b3a6f0f872ec683b0f6dfe0966c05", "title": "Latent Dirichlet Allocation (LDA) and Topic modeling: models, applications, a survey", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/9c71/e6f4e27b3a6f0f872ec683b0f6dfe0966c05.pdf"}, {"id": "b88b83d2ffd30bf3bc3be3fb7492fd88f633b2fe", "title": "Subcategory-Aware Object Classification", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": 2013, "pdf": "http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989a827.pdf"}, {"id": "b6a3802075d460093977f8566c451f950edf7a47", "title": "Facilitating and Exploring Planar Homogeneous Texture for Indoor Scene Understanding", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/0999/e5baf505eed0df8e2661c29354f3757b3399.pdf"}, {"id": "cd6cab9357f333ad9966abc76f830c190a1b7911", "title": "Recognition, reorganisation, reconstruction and reinteraction for scene understanding", "addresses": [{"address": "Oxford Brookes University", "lat": "51.75552050", "lng": "-1.22615970", "type": "edu"}], "year": "2014", "pdf": "https://pdfs.semanticscholar.org/cd6c/ab9357f333ad9966abc76f830c190a1b7911.pdf"}, {"id": "0fe8b5503681128da84a8454a4cc94470adc09ea", "title": "Sparsity Potentials for Detecting Objects with the Hough Transform", "addresses": [{"address": "ETH Z\u00fcrich", "lat": "47.37645340", "lng": "8.54770931", "type": "edu"}, {"address": "Sharif University of Technology", "lat": "35.70362270", "lng": "51.35125097", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/b96a/0ccae1d15cffe3b479b2c56d9132b05cd846.pdf"}, {"id": "9bbc952adb3e3c6091d45d800e806d3373a52bac", "title": "Learning Visual Classifiers using Human-centric Annotations", "addresses": [{"address": "Microsoft", "lat": "47.64233180", "lng": "-122.13693020", "type": "company"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/9bbc/952adb3e3c6091d45d800e806d3373a52bac.pdf"}, {"id": "6e209d7d33c0be8afae863f4e4e9c3e86826711f", "title": "Weakly-supervised segmentation by combining CNN feature maps and object saliency maps", "addresses": [{"address": "University of Electro-Communications", "lat": "35.65729570", "lng": "139.54255868", "type": "edu"}], "year": 2016, "pdf": "http://img.cs.uec.ac.jp/pub/conf16/161204shimok_1_ppt.pdf"}, {"id": "46d85e1dc7057bef62647bd9241601e9896a1b02", "title": "Improving object proposals with multi-thresholding straddling expansion", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2A_040_ext.pdf"}, {"id": "67e3fac91c699c085d47774990572d8ccdc36f15", "title": "Multiple Skip Connections and Dilated Convolutions for Semantic Segmentation", "addresses": [{"address": "Chubu University", "lat": "35.27426550", "lng": "137.01327841", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/67e3/fac91c699c085d47774990572d8ccdc36f15.pdf"}, {"id": "a4f29217d2120ed1490aea7e1c5b78c3b76e972f", "title": "Enhanced object detection via fusion with prior beliefs from image classification", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1610.06907.pdf"}, {"id": "f2d07a77711a8d74bbfa48a0436dae18a698b05a", "title": "Composite Statistical Learning and Inference for Semantic Segmentation", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}, {"address": "University of Coimbra", "lat": "40.20759510", "lng": "-8.42566148", "type": "edu"}, {"address": "Lund University", "lat": "55.70395710", "lng": "13.19020110", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/f2d0/7a77711a8d74bbfa48a0436dae18a698b05a.pdf"}, {"id": "ff11cb09e409996020a2dc3a8afc3b535e6b2482", "title": "Faster Bounding Box Annotation for Object Detection in Indoor Scenes", "addresses": [{"address": "Tampere University of Technology", "lat": "61.44964205", "lng": "23.85877462", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.03142.pdf"}, {"id": "e103fa24d7fa297cd206b22b3bf670bfda6c65c4", "title": "Object Detection in Very High-Resolution Aerial Images Using One-Stage Densely Connected Feature Pyramid Network", "addresses": [{"address": "Chonbuk National University", "lat": "35.84658875", "lng": "127.13501330", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/e103/fa24d7fa297cd206b22b3bf670bfda6c65c4.pdf"}, {"id": "9a781a01b5a9c210dd2d27db8b73b7d62bc64837", "title": "An Attempt to Build Object Detection Models by Reusing Parts", "addresses": [{"address": "Brown University", "lat": "41.82686820", "lng": "-71.40123146", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/9a78/1a01b5a9c210dd2d27db8b73b7d62bc64837.pdf"}, {"id": "ac559888f996923c06b1cf90db6b57b12e582289", "title": "Benchmarking neuromorphic vision: lessons learnt from computer vision", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "ETH Z\u00fcrich", "lat": "47.37645340", "lng": "8.54770931", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/ac55/9888f996923c06b1cf90db6b57b12e582289.pdf"}, {"id": "2a4fc35acaf09517e9c63821cadd428a84832416", "title": "Learning object class detectors from weakly annotated video", "addresses": [{"address": "University of Edinburgh", "lat": "55.94951105", "lng": "-3.19534913", "type": "edu"}], "year": 2012, "pdf": "http://www.vision.ee.ethz.ch/en/publications/papers/proceedings/eth_biwi_00905.pdf"}, {"id": "cd4850de71e4e858be5f5e6ef7f48d5bf7decea6", "title": "Distribution Entropy Boosted VLAD for Image Retrieval", "addresses": [{"address": "Jilin University", "lat": "22.05356500", "lng": "113.39913285", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/cd48/50de71e4e858be5f5e6ef7f48d5bf7decea6.pdf"}, {"id": "34b925a111ba29f73f5c0d1b363f357958d563c1", "title": "SAPPHIRE: An always-on context-aware computer vision system for portable devices", "addresses": [{"address": "Purdue University", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu"}, {"address": "Microsoft", "lat": "47.64233180", "lng": "-122.13693020", "type": "company"}], "year": 2015, "pdf": "https://www.microsoft.com/en-us/research/wp-content/uploads/2015/03/Shoaib_DATE_2015.pdf"}, {"id": "c76b611a986a2e09df22603d93b2d9125aaff369", "title": "Generating Self-Guided Dense Annotations for Weakly Supervised Semantic Segmentation", "addresses": [{"address": "National Tsing Hua University", "lat": "24.79254840", "lng": "120.99511830", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.07050.pdf"}, {"id": "1927d01b6b9acf865401b544e25b62a7ddbac5fa", "title": "An Enhanced Region Proposal Network for object detection using deep learning method", "addresses": [{"address": "Jilin University", "lat": "22.05356500", "lng": "113.39913285", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/1927/d01b6b9acf865401b544e25b62a7ddbac5fa.pdf"}, {"id": "1ecd20f7fc34344e396825d27bc5a9871ab0d0c2", "title": "SG-One: Similarity Guidance Network for One-Shot Semantic Segmentation", "addresses": [{"address": "University of Technology Sydney", "lat": "-33.88096510", "lng": "151.20107299", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.09091.pdf"}, {"id": "26aa0aff1ea1baf848a521363cc455044690e090", "title": "A 2D + 3D Rich Data Approach to Scene Understanding", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/26aa/0aff1ea1baf848a521363cc455044690e090.pdf"}, {"id": "3548cb9ee54bd4c8b3421f1edd393da9038da293", "title": "(Unseen) event recognition via semantic compositionality", "addresses": [{"address": "University of Trento", "lat": "46.06588360", "lng": "11.11598940", "type": "edu"}], "year": 2012, "pdf": "http://www.huppelen.nl/publications/2012cvprUnseenEventCompositionality.pdf"}, {"id": "25ee08db14dca641d085584909b551042618b8bf", "title": "Learning to Segment Instances in Videos with Spatial Propagation Network", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}, {"address": "University of California, Merced", "lat": "37.36566745", "lng": "-120.42158888", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/25ee/08db14dca641d085584909b551042618b8bf.pdf"}, {"id": "d0f81c31e11af1783644704321903a3d2bd83fd6", "title": "3D Fa\u00e7ade Labeling over Complex Scenarios: A Case Study Using Convolutional Neural Network and Structure-From-Motion", "addresses": [{"address": "University of Stuttgart", "lat": "48.90953380", "lng": "9.18318920", "type": "edu"}, {"address": "University of Exeter", "lat": "50.73693020", "lng": "-3.53647672", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/d0f8/1c31e11af1783644704321903a3d2bd83fd6.pdf"}, {"id": "a996f22a2d0c685f7e4972df9f45e99efc3cbb76", "title": "Towards the Success Rate of One: Real-Time Unconstrained Salient Object Detection", "addresses": [{"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1708.00079.pdf"}, {"id": "4da5f0c1d07725a06c6b4a2646e31ea3a5f14435", "title": "End-to-End Training of Hybrid CNN-CRF Models for Semantic Segmentation using Structured Learning", "addresses": [{"address": "Graz University of Technology", "lat": "47.05821000", "lng": "15.46019568", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/4da5/f0c1d07725a06c6b4a2646e31ea3a5f14435.pdf"}, {"id": "26c58e24687ccbe9737e41837aab74e4a499d259", "title": "Codemaps - Segment, Classify and Search Objects Locally", "addresses": [{"address": "University of Amsterdam", "lat": "52.35536550", "lng": "4.95016440", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Li_Codemaps_-_Segment_2013_ICCV_paper.pdf"}, {"id": "299b65d5d3914dad9aae2f936165dcebcf78db88", "title": "Weakly-and Semi-Supervised Learning of a Deep Convolutional Network for Semantic Image Segmentation", "addresses": [{"address": "Google", "lat": "37.42199990", "lng": "-122.08405750", "type": "company"}], "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.203"}, {"id": "cb5dcd048b0eaa78a887a014be26a8a7b1325d36", "title": "Joint Learning of Set Cardinality and State Distribution", "addresses": [{"address": "University of Adelaide", "lat": "-34.91892260", "lng": "138.60423668", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1709.04093.pdf"}, {"id": "63660c50e2669a5115c2379e622549d8ed79be00", "title": "Deep Salient Object Detection by Integrating Multi-level Cues", "addresses": [{"address": "Northwestern Polytechnical University", "lat": "34.24691520", "lng": "108.91061982", "type": "edu"}, {"address": "Australian National University", "lat": "-35.27769990", "lng": "149.11852700", "type": "edu"}], "year": 2017, "pdf": "http://porikli.com/mysite/pdfs/porikli%202017%20-%20Deep%20salient%20object%20detection%20by%20integrating%20multi-level%20cues.pdf"}, {"id": "472541ccd941b9b4c52e1f088cc1152de9b3430f", "title": "Learning in an Uncertain World: Representing Ambiguity Through Multiple Hypotheses", "addresses": [{"address": "Technical University Munich", "lat": "48.14955455", "lng": "11.56775314", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1612.00197.pdf"}, {"id": "9184b0c04013bfdfd82f4f271b5f017396c2f085", "title": "Semantic Segmentation for Line Drawing Vectorization Using Neural Networks", "addresses": [{"address": "ETH Z\u00fcrich", "lat": "47.37645340", "lng": "8.54770931", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/9184/b0c04013bfdfd82f4f271b5f017396c2f085.pdf"}, {"id": "57488aa24092fa7118aa5374c90b282a32473cf9", "title": "A Weakly Supervised Adaptive DenseNet for Classifying Thoracic Diseases and Identifying Abnormalities", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "University of Pennsylvania", "lat": "39.94923440", "lng": "-75.19198985", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.01257.pdf"}, {"id": "7771807cd05f78a4591f2d0b094ddd3e0bd5339a", "title": "Adaptive Feeding: Achieving Fast and Accurate Detections by Adaptively Combining Object Detectors", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1707.06399.pdf"}, {"id": "4558338873556d01fd290de6ddc55721c633a1ad", "title": "Training Constrained Deconvolutional Networks for Road Scene Semantic Segmentation", "addresses": [{"address": "Cambridge University", "lat": "50.79440260", "lng": "-1.09717480", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4558/338873556d01fd290de6ddc55721c633a1ad.pdf"}, {"id": "85957b49896246bb416c0a182e52b355a8fa40b4", "title": "Feature Pyramid Network for Multi-Class Land Segmentation", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.03510.pdf"}, {"id": "f5eb411217f729ad7ae84bfd4aeb3dedb850206a", "title": "Tackling Low Resolution for Better Scene Understanding", "addresses": [{"address": "International Institute of Information Technology", "lat": "17.44549570", "lng": "78.34854698", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/f5eb/411217f729ad7ae84bfd4aeb3dedb850206a.pdf"}, {"id": "7fb8d9c36c23f274f2dd84945dd32ec2cc143de1", "title": "Semantic Segmentation with Second-Order Pooling", "addresses": [{"address": "Institute of Systems and Robotics", "lat": "53.83383710", "lng": "10.70359390", "type": "edu"}, {"address": "University of Bonn", "lat": "50.73381240", "lng": "7.10224650", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/8e44/ba779d7cdc23d597c2c6e4420129834e7e21.pdf"}, {"id": "b5e3beb791cc17cdaf131d5cca6ceb796226d832", "title": "Novel Dataset for Fine-Grained Image Categorization: Stanford Dogs", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/b5e3/beb791cc17cdaf131d5cca6ceb796226d832.pdf"}, {"id": "b5968e7bb23f5f03213178c22fd2e47af3afa04c", "title": "Multiple-Human Parsing in the Wild", "addresses": [{"address": "Beijing Jiaotong University", "lat": "39.94976005", "lng": "116.33629046", "type": "edu"}, {"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1705.07206.pdf"}, {"id": "532c089b43983935e1001c5e35aa35440263beaf", "title": "G-Distillation: Reducing Overconfident Errors on Novel Samples", "addresses": [{"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.03166.pdf"}, {"id": "35fc0b28d0d674b28dd625d170bc641a36b17318", "title": "CSI: Composite Statistical Inference Techniques for Semantic Segmentation", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}, {"address": "Lund University", "lat": "55.70395710", "lng": "13.19020110", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/35fc/0b28d0d674b28dd625d170bc641a36b17318.pdf"}, {"id": "e4cb27d2a3e1153cb517d97d61de48ff0483c988", "title": "Viktoria Plemakova Vehicle Detection Based on Convolutional Neural Networks", "addresses": [{"address": "University of Tartu", "lat": "58.38131405", "lng": "26.72078081", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/e4cb/27d2a3e1153cb517d97d61de48ff0483c988.pdf"}, {"id": "3d0660e18c17db305b9764bb86b21a429241309e", "title": "Counting Everyday Objects in Everyday Scenes", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1604.03505.pdf"}, {"id": "a67da2dd79c01e8cc4029ecc5a05b97967403862", "title": "On Selecting Helpful Unlabeled Data for Improving Semi-Supervised Support Vector Machines", "addresses": [{"address": "Myongji University", "lat": "37.23810230", "lng": "127.19034310", "type": "edu"}], "year": "2014", "pdf": "https://pdfs.semanticscholar.org/a67d/a2dd79c01e8cc4029ecc5a05b97967403862.pdf"}, {"id": "4ab69672e1116427d685bf7c1edb5b1fd0573b5e", "title": "Spatial pooling of heterogeneous features for image applications", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2012, "pdf": "http://bigml.cs.tsinghua.edu.cn/~lingxi/PDFs/Xie_ACMMM12_EdgeGPP.pdf"}, {"id": "989c7cdafa9b90ab2ea0a9d8fa60634cc698f174", "title": "YoloFlow Real - time Object Tracking in Video CS 229 Course Project", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/989c/7cdafa9b90ab2ea0a9d8fa60634cc698f174.pdf"}, {"id": "85af6c005df806b57b306a732dcb98e096d15bfb", "title": "Getting to Know Low-light Images with The Exclusively Dark Dataset", "addresses": [{"address": "University of Malaya", "lat": "3.12267405", "lng": "101.65356103", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.11227.pdf"}, {"id": "cdb293381ff396d6e9c0f5e9578d411e759347fd", "title": "3 DR 2 N 2 : A Unified Approach for Single and Multiview 3 D Object Reconstruction", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/022e/eae0edc09deb228da26d5390874f781ace0f.pdf"}, {"id": "0e67717484684d90ae9d4e1bb9cdceb74b194910", "title": "Mining Pixels: Weakly Supervised Semantic Segmentation Using Image Labels", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/0e67/717484684d90ae9d4e1bb9cdceb74b194910.pdf"}, {"id": "5b4b84ce3518c8a14f57f5f95a1d07fb60e58223", "title": "Diagnosing Error in Object Detectors", "addresses": [{"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": "2012", "pdf": "https://pdfs.semanticscholar.org/9f92/05a60ddf1135929e0747db34363b3a8c6bc8.pdf"}, {"id": "47203943c86e4d9355ffd99cd3d75f37211fd805", "title": "Semi-Crowdsourced Clustering: Generalizing Crowd Labeling by Robust Distance Metric Learning", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}, {"address": "GE Global Research Center", "lat": "42.82982480", "lng": "-73.87719385", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/be18/9c7066c4d99d617d137c975139c594ad09af.pdf"}, {"id": "45ff38add61df32a027048624f58952a67a7c5f5", "title": "Deep Context Convolutional Neural Networks for Semantic Segmentation", "addresses": [{"address": "Temple University", "lat": "39.95472495", "lng": "-75.15346905", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/45ff/38add61df32a027048624f58952a67a7c5f5.pdf"}, {"id": "0a789733ccb300d0dd9df6174faaa7e8c64e0409", "title": "High-Resolution Multispectral Dataset for Semantic Segmentation", "addresses": [{"address": "Rochester Institute of Technology", "lat": "43.08250655", "lng": "-77.67121663", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/0a78/9733ccb300d0dd9df6174faaa7e8c64e0409.pdf"}, {"id": "9d3a6e459e0cecda20a8afd69d182877ff0224cf", "title": "A Framework for Articulated Hand Pose Estimation and Evaluation", "addresses": [{"address": "Graz University of Technology", "lat": "47.05821000", "lng": "15.46019568", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/9d3a/6e459e0cecda20a8afd69d182877ff0224cf.pdf"}, {"id": "943a1e218b917172199e524944006aa349f58968", "title": "Joint Learning of Intrinsic Images and Semantic Segmentation", "addresses": [{"address": "University of Amsterdam", "lat": "52.35536550", "lng": "4.95016440", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11857.pdf"}, {"id": "5f68e2131d9275d56092e9fca05bcfc65abea0d8", "title": "Cross-Modal Similarity Learning: A Low Rank Bilinear Formulation", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2015, "pdf": "http://doi.acm.org/10.1145/2806416.2806469"}, {"id": "f989a20fbcc2d576c0c4514a0e5085c741580778", "title": "Co-localization with Category-Consistent Features and Geodesic Distance Propagation", "addresses": [{"address": "Stony Brook University", "lat": "40.91531960", "lng": "-73.12706260", "type": "edu"}, {"address": "Harvard University", "lat": "42.36782045", "lng": "-71.12666653", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1612.03236.pdf"}, {"id": "cf94200a476dc15d6da95db809349db4cfd8e92c", "title": "Leveraging Motion Priors in Videos for Improving Human Segmentation", "addresses": [{"address": "National Tsing Hua University", "lat": "24.79254840", "lng": "120.99511830", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11436.pdf"}, {"id": "25dba68e4db0ce361032126b91f734f9252cae7c", "title": "DeepSetNet: Predicting Sets with Deep Neural Networks", "addresses": [{"address": "University of Adelaide", "lat": "-34.91892260", "lng": "138.60423668", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1611.08998.pdf"}, {"id": "883767948f535ea2bf8a0c03047ca9064e1b078f", "title": "A Combination of Object Recognition and Localisation for an Autonomous Racecar", "addresses": [{"address": "KTH Royal Institute of Technology, Stockholm", "lat": "59.34986645", "lng": "18.07063213", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/8837/67948f535ea2bf8a0c03047ca9064e1b078f.pdf"}, {"id": "18095a530b532a70f3b615fef2f59e6fdacb2d84", "title": "Deep Structured Scene Parsing by Learning with Image Descriptions", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}, {"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2016, "pdf": "https://arxiv.org/pdf/1604.02271v3.pdf"}, {"id": "9397e7acd062245d37350f5c05faf56e9cfae0d6", "title": "DeepFruits: A Fruit Detection System Using Deep Neural Networks", "addresses": [{"address": "Queensland University of Technology", "lat": "-27.47715625", "lng": "153.02841004", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/9397/e7acd062245d37350f5c05faf56e9cfae0d6.pdf"}, {"id": "03a24d15533dae78de78fd9d5f6c9050fb97f186", "title": "Pedestrian detection aided by scale-discriminative network", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/SSCI.2016.7850112"}, {"id": "17d4fd92352baf6f0039ec64d43ca572c8252384", "title": "MoE-SPNet: A mixture-of-experts scene parsing network", "addresses": [{"address": "University of Sydney", "lat": "-33.88890695", "lng": "151.18943366", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.07049.pdf"}, {"id": "30a29f6c407749e97bc7c2db5674a62773af9d27", "title": "Tracking and Visual Quality Inspection in Harsh Environments (print-version)", "addresses": [{"address": "Graz University of Technology", "lat": "47.05821000", "lng": "15.46019568", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/30a2/9f6c407749e97bc7c2db5674a62773af9d27.pdf"}, {"id": "280d632ef3234c5ab06018c6eaccead75bc173b3", "title": "Efficient Image and Video Co-localization with Frank-Wolfe Algorithm", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/6b1a/c8e438041ac02cc8fab5762ca069c386f473.pdf"}, {"id": "0f945f796a9343b51a3dc69941c0fa1a98c0f448", "title": "Local Hypersphere Coding Based on Edges between Visual Words", "addresses": [{"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/a7ef/979ce52b9e4bcbd6ee5524dfd4e92baf6292.pdf"}, {"id": "0db6a58927a671c01089c53248b0e1c36bdc3231", "title": "Efficient Point Process Inference for Large-Scale Object Detection", "addresses": [{"address": "University of Adelaide", "lat": "-34.91892260", "lng": "138.60423668", "type": "edu"}], "year": 2016, "pdf": "http://openaccess.thecvf.com/content_cvpr_2016/papers/Pham_Efficient_Point_Process_CVPR_2016_paper.pdf"}, {"id": "14d0afea52c4e9b7a488f6398e4a92bd4f4b93c7", "title": "Rethinking the Faster R-CNN Architecture for Temporal Action Localization", "addresses": [{"address": "University of Michigan", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.07667.pdf"}, {"id": "8da1b0834688edb311a803532e33939e9ecf8292", "title": "CornerNet: Detecting Objects as Paired Keypoints", "addresses": [{"address": "University of Michigan", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1808.01244.pdf"}, {"id": "f42d3225afd9e463ddb7a355f64b54af8bd14227", "title": "Stacked U-Nets: A No-Frills Approach to Natural Image Segmentation", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.10343.pdf"}, {"id": "a1dd88f44d045b360569a9a8721f728afbd951c3", "title": "Relief Impression Image Detection : Unsupervised Extracting Objects Directly From Feature Arrangements of Deep CNN", "addresses": [{"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/a1dd/88f44d045b360569a9a8721f728afbd951c3.pdf"}, {"id": "fc027fccb19512a439fc17181c34ee1c3aad51b5", "title": "Joint Multi-person Pose Estimation and Semantic Part Segmentation", "addresses": [{"address": "University of California, Los Angeles", "lat": "34.06877880", "lng": "-118.44500940", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.03383.pdf"}, {"id": "377f2b65e6a9300448bdccf678cde59449ecd337", "title": "Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results", "addresses": [{"address": "Johns Hopkins University", "lat": "39.32905300", "lng": "-76.61942500", "type": "edu"}, {"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.10275.pdf"}, {"id": "451eed7fd8ae281d1cc76ca8cdecbaf47816e55a", "title": "Close Yet Distinctive Domain Adaptation", "addresses": [{"address": "Shanghai Jiao Tong University", "lat": "31.20081505", "lng": "121.42840681", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/451e/ed7fd8ae281d1cc76ca8cdecbaf47816e55a.pdf"}, {"id": "992b93ab9d016640551a8cebcaf4757288154f32", "title": "Nested Pictorial Structures", "addresses": [{"address": "Duke University", "lat": "35.99905220", "lng": "-78.92906290", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/e38c/f96363aaf1f17c487c484ad27d3175ca4b31.pdf"}, {"id": "7489990ea3d6ab4c1c86c9ed9f049399961dfaef", "title": "Normalized cutswith soft must-link constraints for image segmentation and clustering", "addresses": [{"address": "Rochester Institute of Technology", "lat": "43.08250655", "lng": "-77.67121663", "type": "edu"}], "year": 2014, "pdf": "https://people.rit.edu/ndcsma/pubs/WNYISPW_Nov_2014_Chew.pdf"}, {"id": "41199678ad9370ff8ca7e9e3c2617b62a297fac3", "title": "Multitask Deep Learning models for real-time deployment in embedded systems", "addresses": [{"address": "KTH Royal Institute of Technology, Stockholm", "lat": "59.34986645", "lng": "18.07063213", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/4119/9678ad9370ff8ca7e9e3c2617b62a297fac3.pdf"}, {"id": "7fb74f5abab4830e3cdaf477230e5571d9e3ca57", "title": "Polyhedral Conic Classifiers for Visual Object Detection and Classification", "addresses": [{"address": "Eskisehir Osmangazi University", "lat": "39.74875160", "lng": "30.47653071", "type": "edu"}], "year": 2017, "pdf": "http://openaccess.thecvf.com/content_cvpr_2017/papers/Cevikalp_Polyhedral_Conic_Classifiers_CVPR_2017_paper.pdf"}, {"id": "10793d1475607929fedc6d9a677911ad16843e58", "title": "Unsupervised Learning of Edges", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": 2016, "pdf": "http://openaccess.thecvf.com/content_cvpr_2016/papers/Li_Unsupervised_Learning_of_CVPR_2016_paper.pdf"}, {"id": "c94fd258a8f1e8f4033a7fe491f1372dcf7d3cd6", "title": "TS ^2 2 C: Tight Box Mining with Surrounding Segmentation Context for Weakly Supervised Object Detection", "addresses": [{"address": "Fudan University", "lat": "31.30104395", "lng": "121.50045497", "type": "edu"}, {"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.04897.pdf"}, {"id": "2581a12189eb1a0b5b27a7fd1c2cbe44c88fcc20", "title": "Analyzing Classifiers: Fisher Vectors and Deep Neural Networks", "addresses": [{"address": "TU Berlin", "lat": "52.51806410", "lng": "13.32504250", "type": "edu"}], "year": 2016, "pdf": "http://arxiv.org/pdf/1512.00172v1.pdf"}, {"id": "96416b1b44fb05302c6e9a8ab1b74d9204995e73", "title": "Learning Effective Binary Visual Representations with Deep Networks", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/9641/6b1b44fb05302c6e9a8ab1b74d9204995e73.pdf"}, {"id": "aa2ddae22760249729ac2c2c4e24c8b665bcd40e", "title": "Interpretable Basis Decomposition for Visual Explanation", "addresses": [{"address": "MIT CSAIL", "lat": "42.36194070", "lng": "-71.09043780", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/8c47/635ae7f1641c2bdd45026ad7dbff70c24398.pdf"}, {"id": "60542b1a857024c79db8b5b03db6e79f74ec8f9f", "title": "Learning to Detect Human-Object Interactions", "addresses": [{"address": "University of Michigan", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1702.05448.pdf"}, {"id": "bd8a85acaa45d4068fca584e8d9e3bd3bb4eea4d", "title": "Toward Scene Recognition by Discovering Semantic Structures and Parts", "addresses": [{"address": "Shandong University", "lat": "36.36934730", "lng": "120.67381800", "type": "edu"}, {"address": "Simon Fraser University", "lat": "49.27674540", "lng": "-122.91777375", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/bd8a/85acaa45d4068fca584e8d9e3bd3bb4eea4d.pdf"}, {"id": "456abee9c8d31f004b2f0a3b47222043e20f5042", "title": "Unsupervised Visual Sense Disambiguation for Verbs using Multimodal Embeddings", "addresses": [{"address": "University of Edinburgh", "lat": "55.94951105", "lng": "-3.19534913", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1603.09188.pdf"}, {"id": "7c2f6424b0bb2c28f282fbc0b4e98bf85d5584eb", "title": "Relief R-CNN: Utilizing Convolutional Feature Interrelationship for Fast Object Detection Deployment", "addresses": [{"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}, {"address": "Shenzhen University", "lat": "22.53521465", "lng": "113.93159110", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/a5ae/7d662ed086bc5b0c9a2c1dc54fcb23635000.pdf"}, {"id": "59e9934720baf3c5df3a0e1e988202856e1f83ce", "title": "UA-DETRAC: A New Benchmark and Protocol for Multi-Object Detection and Tracking", "addresses": [{"address": "Hanyang University", "lat": "37.55572710", "lng": "127.04366420", "type": "edu"}], "year": "2015", "pdf": "https://arxiv.org/pdf/1511.04136.pdf"}, {"id": "d58c44bd9b464d9ac1db1344445c31364925f75a", "title": "TBN: Convolutional Neural Network with Ternary Inputs and Binary Weights", "addresses": [{"address": "University of Electronic Science and Technology of China", "lat": "40.01419050", "lng": "-83.03091430", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/d58c/44bd9b464d9ac1db1344445c31364925f75a.pdf"}, {"id": "81ba5202424906f64b77f68afca063658139fbb2", "title": "Social Scene Understanding: End-to-End Multi-person Action Localization and Collective Activity Recognition", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}, {"address": "IDIAP Research Institute", "lat": "46.10923700", "lng": "7.08453549", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1611.09078.pdf"}, {"id": "0b6f64c78c44dc043e2972fa7bfe2a5753768609", "title": "A future for learning semantic models of man-made environments", "addresses": [{"address": "University of Bonn", "lat": "50.73381240", "lng": "7.10224650", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICPR.2016.7900008"}, {"id": "016eb7b32d1fdec0899151fb03799378bf59bbe5", "title": "Point Linking Network for Object Detection", "addresses": [{"address": "Huazhong University of Science and Technology", "lat": "30.50975370", "lng": "114.40628810", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/016e/b7b32d1fdec0899151fb03799378bf59bbe5.pdf"}, {"id": "cd9d654c6a4250e0cf8bcfddc2afab9e70ee6cae", "title": "Object Detection with Mask-based Feature Encoding", "addresses": [{"address": "University of South Carolina", "lat": "33.99282980", "lng": "-81.02685168", "type": "edu"}, {"address": "Tianjin University", "lat": "36.20304395", "lng": "117.05842113", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/cd9d/654c6a4250e0cf8bcfddc2afab9e70ee6cae.pdf"}, {"id": "28737575297a20d431dd2b777a79a8be2c9c2bbd", "title": "Object Ranking on Deformable Part Models with Bagged LambdaMART", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/2873/7575297a20d431dd2b777a79a8be2c9c2bbd.pdf"}, {"id": "46702e0127e16a4d6a1feda3ffc5f0f123957e87", "title": "Revisit Multinomial Logistic Regression in Deep Learning: Data Dependent Model Initialization for Image Recognition", "addresses": [{"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.06131.pdf"}, {"id": "d2b2cb1d5cc1aa30cf5be7bcb0494198934caabb", "title": "A Restricted Visual Turing Test for Deep Scene and Event Understanding", "addresses": [{"address": "University of California, Los Angeles", "lat": "34.06877880", "lng": "-118.44500940", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/d2b2/cb1d5cc1aa30cf5be7bcb0494198934caabb.pdf"}, {"id": "446fbff6a2a7c9989b0a0465f960e236d9a5e886", "title": "Context Encoders: Feature Learning by Inpainting", "addresses": [{"address": "University of California, Berkeley", "lat": "37.86871260", "lng": "-122.25586815", "type": "edu"}], "year": 2016, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Pathak_Context_Encoders_Feature_CVPR_2016_paper.pdf"}, {"id": "291e5377df2eec4835b5c6889896941831a11c69", "title": "Recovering 6D Object Pose: Multi-modal Analyses on Challenges", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/291e/5377df2eec4835b5c6889896941831a11c69.pdf"}, {"id": "b69fbf046faf685655b5fa52fef07fb77e75eff4", "title": "Modeling guidance and recognition in categorical search: bridging human and computer object detection.", "addresses": [{"address": "Stony Brook University", "lat": "40.91531960", "lng": "-73.12706260", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/b69f/bf046faf685655b5fa52fef07fb77e75eff4.pdf"}, {"id": "13bda03fc8984d5943ed8d02e49a779d27c84114", "title": "Efficient object detection using cascades of nearest convex model classifiers", "addresses": [{"address": "Eskisehir Osmangazi University", "lat": "39.74875160", "lng": "30.47653071", "type": "edu"}], "year": 2012, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6248047"}, {"id": "87a66ccc68374ffb704ee6fb9fa7df369718095c", "title": "Multi-person Pose Estimation with Local Joint-to-Person Associations", "addresses": [{"address": "University of Bonn", "lat": "50.73381240", "lng": "7.10224650", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/ea90/16fb585ba6449d3d6f98bf85fa0bcd1f4621.pdf"}, {"id": "4960ab1cef23e5ccd60173725ea280f462164a0e", "title": "Video Object Segmentation by Learning Location-Sensitive Embeddings", "addresses": [{"address": "Peking University", "lat": "39.99223790", "lng": "116.30393816", "type": "edu"}, {"address": "Microsoft Research Asia", "lat": "39.97721700", "lng": "116.33763200", "type": "company"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/4960/ab1cef23e5ccd60173725ea280f462164a0e.pdf"}, {"id": "8856fbf333b2aba7b9f1f746e16a2b7f083ee5b8", "title": "Analyzing animal behavior via classifying each video frame using convolutional neural networks", "addresses": [{"address": "Duke University", "lat": "35.99905220", "lng": "-78.92906290", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/8856/fbf333b2aba7b9f1f746e16a2b7f083ee5b8.pdf"}, {"id": "f9f01af981f8d25f0c96ea06d88be62dabb79256", "title": "Terahertz Image Detection with the Improved Faster Region-Based Convolutional Neural Network", "addresses": [{"address": "Xidian University", "lat": "34.12358250", "lng": "108.83546000", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/f9f0/1af981f8d25f0c96ea06d88be62dabb79256.pdf"}, {"id": "09066d7d0bb6273bf996c8538d7b34c38ea6a500", "title": "Yes, IoU loss is submodular - as a function of the mispredictions", "addresses": [{"address": "Yonsei University", "lat": "37.56004060", "lng": "126.93692480", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.01845.pdf"}, {"id": "4aeebd1c9b4b936ed2e4d988d8d28e27f129e6f1", "title": "See the Difference: Direct Pre-Image Reconstruction and Pose Estimation by Differentiating HOG", "addresses": [{"address": "Max Planck Institute for Informatics", "lat": "49.25795660", "lng": "7.04577417", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Chiu_See_the_Difference_ICCV_2015_paper.pdf"}, {"id": "232ff2dab49cb5a1dae1012fd7ba53382909ec18", "title": "Semantic Video Segmentation from Occlusion Relations within a Convex Optimization Framework", "addresses": [{"address": "University of California, Los Angeles", "lat": "34.06877880", "lng": "-118.44500940", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/232f/f2dab49cb5a1dae1012fd7ba53382909ec18.pdf"}, {"id": "465c34c3334f29de28f973b7702a235509649429", "title": "Stereopsis via deep learning", "addresses": [{"address": "University of Frankfurt", "lat": "50.13053055", "lng": "8.69234224", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/465c/34c3334f29de28f973b7702a235509649429.pdf"}, {"id": "caa2ded6d8d5de97c824d29b0c7a18d220c596c8", "title": "Learning to Segment Breast Biopsy Whole Slide Images", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}, {"address": "University of Vermont", "lat": "44.48116865", "lng": "-73.20021790", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1709.02554.pdf"}, {"id": "289d833a35c2156b7e332e67d1cb099fd0683025", "title": "HICO: A Benchmark for Recognizing Human-Object Interactions in Images", "addresses": [{"address": "University of Michigan", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Chao_HICO_A_Benchmark_ICCV_2015_paper.pdf"}, {"id": "0fbdd4b8eb9e4c4cfbe5b76ab29ab8b0219fbdc0", "title": "Constrained Convolutional Neural Networks for Weakly Supervised Segmentation", "addresses": [{"address": "University of California, Berkeley", "lat": "37.86871260", "lng": "-122.25586815", "type": "edu"}], "year": 2015, "pdf": "https://people.eecs.berkeley.edu/~pathak/papers/iccv15.pdf"}, {"id": "f94f79168c1cfaebb8eab5151e01d56478ab0b73", "title": "Optimizing Region Selection for Weakly Supervised Object Detection", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/f94f/79168c1cfaebb8eab5151e01d56478ab0b73.pdf"}, {"id": "6bb51f431f348b2b3e1db859827e80f97a576c30", "title": "Irregular Convolutional Neural Networks", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/6bb5/1f431f348b2b3e1db859827e80f97a576c30.pdf"}, {"id": "b78e611c32dc0daf762cfa93044558cdb545d857", "title": "Temporal Action Detection with Structured Segment Networks Supplementary Materials", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/b78e/611c32dc0daf762cfa93044558cdb545d857.pdf"}, {"id": "bc12715a1ddf1a540dab06bf3ac4f3a32a26b135", "title": "Tracking the Trackers: An Analysis of the State of the Art in Multiple Object Tracking", "addresses": [{"address": "Technical University Munich", "lat": "48.14955455", "lng": "11.56775314", "type": "edu"}, {"address": "University of Adelaide", "lat": "-34.91892260", "lng": "138.60423668", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/bc12/715a1ddf1a540dab06bf3ac4f3a32a26b135.pdf"}, {"id": "4d1757aacbc49c74a5d4e53259c92ab0e47544da", "title": "Weakly and Semi Supervised Human Body Part Parsing via Pose-Guided Knowledge Transfer", "addresses": [{"address": "Shanghai Jiao Tong University", "lat": "31.20081505", "lng": "121.42840681", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.04310.pdf"}, {"id": "d392098688a999c70589c995bd4427c212eff69d", "title": "Object Repositioning Based on the Perspective in a Single Image", "addresses": [{"address": "University of Tsukuba", "lat": "36.11120580", "lng": "140.10551760", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/d392/098688a999c70589c995bd4427c212eff69d.pdf"}, {"id": "1c1f21bf136fe2eec412e5f70fd918c27c5ccb0a", "title": "Object Detection and Viewpoint Estimation with Auto-masking Neural Network", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/1c1f/21bf136fe2eec412e5f70fd918c27c5ccb0a.pdf"}, {"id": "72e9acdd64e71fc2084acaf177aafaa2e075bd8c", "title": "The 2017 Hands in the Million Challenge on 3D Hand Pose Estimation", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/72e9/acdd64e71fc2084acaf177aafaa2e075bd8c.pdf"}, {"id": "0209389b8369aaa2a08830ac3b2036d4901ba1f1", "title": "DenseReg: Fully Convolutional Dense Shape Regression In-the-Wild", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "University College London", "lat": "51.52316070", "lng": "-0.12820370", "type": "edu"}], "year": 2017, "pdf": "https://arxiv.org/pdf/1612.01202v2.pdf"}, {"id": "07b8a9a225b738c4074a50cf80ee5fe516878421", "title": "Convolutional Simplex Projection Network for Weakly Supervised Semantic Segmentation", "addresses": [{"address": "University of Bonn", "lat": "50.73381240", "lng": "7.10224650", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.09169.pdf"}, {"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "title": "End-to-End People Detection in Crowded Scenes", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}, {"address": "Max Planck Institute for Informatics", "lat": "49.25795660", "lng": "7.04577417", "type": "edu"}], "year": 2016, "pdf": "http://arxiv.org/pdf/1506.04878.pdf"}, {"id": "1bbe0371ca22c2fdb6e0d098049bbf6430324bdb", "title": "Socializing the Semantic Gap: A Comparative Survey on Image Tag Assignment, Refinement and Retrieval", "addresses": [{"address": "University of Florence", "lat": "43.77764260", "lng": "11.25976500", "type": "edu"}, {"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": 2016, "pdf": "http://doi.acm.org/10.1145/2906152"}, {"id": "9954f7ee5288724184f9420e39cca9165efa6822", "title": "Estimation of object functions using deformable part model", "addresses": [{"address": "Kobe University", "lat": "34.72757140", "lng": "135.23710000", "type": "edu"}], "year": 2015, "pdf": "http://www.me.cs.scitec.kobe-u.ac.jp/~takigu/pdf/2015/Th5_4.pdf"}, {"id": "e212b2bc41645fe467a73d004067fcf1ca77d87f", "title": "Deep Active Contours", "addresses": [{"address": "Technical University Munich", "lat": "48.14955455", "lng": "11.56775314", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/e212/b2bc41645fe467a73d004067fcf1ca77d87f.pdf"}, {"id": "51c4ecf4539f56c4b1035b890f743b3a91dd758b", "title": "Situational object boundary detection", "addresses": [{"address": "University of Edinburgh", "lat": "55.94951105", "lng": "-3.19534913", "type": "edu"}], "year": 2015, "pdf": "http://arxiv.org/abs/1504.06434"}, {"id": "007e86cb55f0ba0415a7764a1e9f9566c1e8784b", "title": "Adversarial Feature Learning", "addresses": [{"address": "University of California, Berkeley", "lat": "37.86871260", "lng": "-122.25586815", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/2677/3023b17ba560bad6a679930710a9049abca5.pdf"}, {"id": "54d97ea9a5f92761dddd148fb0e602c2293e7c16", "title": "Associating Inter-image Salient Instances for Weakly Supervised Semantic Segmentation", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}, {"address": "Cardiff University", "lat": "51.48799610", "lng": "-3.17969747", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/54d9/7ea9a5f92761dddd148fb0e602c2293e7c16.pdf"}, {"id": "0e923b74fd41f73f57e22f66397feeea67e834f0", "title": "Invariant encoding schemes for visual recognition", "addresses": [{"address": "University College London", "lat": "51.52316070", "lng": "-0.12820370", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/0e92/3b74fd41f73f57e22f66397feeea67e834f0.pdf"}, {"id": "93cba94ff0ff96f865ce24ea01e9c006369d75ff", "title": "Knowledge Aided Consistency for Weakly Supervised Phrase Grounding", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.03879.pdf"}, {"id": "24fc311970e097efc317c0f98d2df37b828bfbad", "title": "Semi-supervised hierarchical semantic object parsing", "addresses": [{"address": "Amirkabir University of Technology", "lat": "35.70451400", "lng": "51.40972058", "type": "edu"}], "year": 2017, "pdf": "https://arxiv.org/pdf/1709.08019v2.pdf"}, {"id": "5c4d4fd37e8c80ae95c00973531f34a6d810ea3a", "title": "The Open World of Micro-Videos", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1603.09439.pdf"}, {"id": "71b973c87965e4086e75fd2379dd1bd8e3f8231e", "title": "Progressive Attention Networks for Visual Attribute Prediction", "addresses": [{"address": "Seoul National University", "lat": "37.26728000", "lng": "126.98411510", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1606.02393.pdf"}, {"id": "20c02e98602f6adf1cebaba075d45cef50de089f", "title": "Video Jigsaw: Unsupervised Learning of Spatiotemporal Context for Video Action Recognition", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1808.07507.pdf"}, {"id": "c17ed26650a67e80151f5312fa15b5c423acc797", "title": "Multiple-Kernel Based Vehicle Tracking Using 3D Deformable Model and Camera Self-Calibration", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}, {"address": "Institute of Industrial Science", "lat": "36.05238585", "lng": "140.11852361", "type": "edu"}, {"address": "University of Tokyo", "lat": "35.90204480", "lng": "139.93622009", "type": "edu"}, {"address": "Microsoft", "lat": "47.64233180", "lng": "-122.13693020", "type": "company"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/c17e/d26650a67e80151f5312fa15b5c423acc797.pdf"}, {"id": "0ce08f1cc6684495d12c2da157a056c7b88ffcd9", "title": "Multi-Modality Feature Transform: An Interactive Image Segmentation Approach", "addresses": [{"address": "Alexandria University", "lat": "31.21051105", "lng": "29.91314562", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/0ce0/8f1cc6684495d12c2da157a056c7b88ffcd9.pdf"}, {"id": "567078a51ea63b70396dca5dabb50a10a736d991", "title": "Conditional Generative Adversarial Network for Structured Domain Adaptation", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}, {"address": "University at Buffalo", "lat": "43.00080930", "lng": "-78.78896970", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/1b5a/3bdb174df1ff36c1c101739d6daaec07760d.pdf"}, {"id": "6e4e5ef25f657de8fb383c8dfeb8e229eea28bb9", "title": "RON: Reverse Connection with Objectness Prior Networks for Object Detection", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1707.01691.pdf"}, {"id": "cf528f9fe6588b71efa94c219979ce111fc9c1c9", "title": "On Evaluation of 6D Object Pose Estimation", "addresses": [{"address": "Czech Technical University", "lat": "50.07642960", "lng": "14.41802312", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/cf52/8f9fe6588b71efa94c219979ce111fc9c1c9.pdf"}, {"id": "3b67645cd512898806aaf1df1811035f2d957f6b", "title": "SCNet: Learning Semantic Correspondence", "addresses": [{"address": "University of Hong Kong", "lat": "22.20814690", "lng": "114.25964115", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1705.04043.pdf"}, {"id": "ef2e36daf429899bb48d80ce6804731c3f99bb85", "title": "Debnath, Banerjee, Namboodiri: Adapting Ransac-svm to Detect Outliers for Robust Classification", "addresses": [{"address": "Indian Institute of Technology Kanpur", "lat": "26.51318800", "lng": "80.23651945", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/f7bd/b4df0fb5b3ff9fa0ebfe7c2a9ddc34c09a5c.pdf"}, {"id": "79a3a07661b8c6a36070fd767344e15c847a30ef", "title": "Contextual Pooling in Image Classification", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/79a3/a07661b8c6a36070fd767344e15c847a30ef.pdf"}, {"id": "5aa7f33cdc00787284b609aa63f5eb5c0a3212f6", "title": "Multiplicative mixing of object identity and image attributes in single inferior temporal neurons", "addresses": [{"address": "Indian Institute of Science Bangalore", "lat": "13.02223470", "lng": "77.56718325", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/5aa7/f33cdc00787284b609aa63f5eb5c0a3212f6.pdf"}, {"id": "38f88655debf4bf32978a7b39fbd56aea6ee5752", "title": "Class Rectification Hard Mining for Imbalanced Deep Learning", "addresses": [{"address": "Queen Mary University of London", "lat": "51.52472720", "lng": "-0.03931035", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1712.03162.pdf"}, {"id": "7b3b2912c1d7a70839bc71a150e33f8634d0fff3", "title": "Convolutional Neural Network-Based Embarrassing Situation Detection under Camera for Social Robot in Smart Homes", "addresses": [{"address": "Oklahoma State University", "lat": "36.12447560", "lng": "-97.05004383", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/7b3b/2912c1d7a70839bc71a150e33f8634d0fff3.pdf"}, {"id": "acdc333f7b32d987e65ce15f21db64e850ca9471", "title": "Direct Loss Minimization for Training Deep Neural Nets", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}, {"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/acdc/333f7b32d987e65ce15f21db64e850ca9471.pdf"}, {"id": "da4137396f26bf3e76d04eeed0c94e11b7824aa6", "title": "Transferable Semi-Supervised Semantic Segmentation", "addresses": [{"address": "National University of Defense Technology, China", "lat": "28.22902090", "lng": "112.99483204", "type": "edu"}, {"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "Beckman Institute", "lat": "40.11571585", "lng": "-88.22750772", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1711.06828.pdf"}, {"id": "5240941af3b263609acaa168f96e1decdb0b3fe4", "title": "Action classification in still images using human eye movements", "addresses": [{"address": "Stony Brook University", "lat": "40.91531960", "lng": "-73.12706260", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W06/papers/Ge_Action_Classification_in_2015_CVPR_paper.pdf"}, {"id": "126250d6077a6a68ae06277352eb42c4fa4c8b10", "title": "Learning Patch-based Structural Element Models with Hierarchical Palettes Abstract Learning Patch-based Structural Element Models with Hierarchical Palettes", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/1262/50d6077a6a68ae06277352eb42c4fa4c8b10.pdf"}, {"id": "0cbbbfac2fe925479c6b34712e056f840a10fa4d", "title": "Quality Evaluation Methods for Crowdsourced Image Segmentation", "addresses": [{"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}, {"address": "Facebook", "lat": "37.39367170", "lng": "-122.08072620", "type": "company"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/0cbb/bfac2fe925479c6b34712e056f840a10fa4d.pdf"}, {"id": "28df3f11894ce0c48dd8aee65a6ec76d9009cbbd", "title": "Recurrent Flow-Guided Semantic Forecasting", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.08318.pdf"}, {"id": "535ed3850e79ccd51922601546ef0fc48c5fb468", "title": "A feature embedding strategy for high-level CNN representations from multiple convnets", "addresses": [{"address": "University of Windsor", "lat": "42.30791465", "lng": "-83.07176915", "type": "edu"}, {"address": "Zhejiang University", "lat": "30.19331415", "lng": "120.11930822", "type": "edu"}], "year": 2017, "pdf": "http://arxiv.org/abs/1705.04301"}, {"id": "247ca98c5a46616044cf6ae32b0d5b4140a7a161", "title": "High-performance Semantic Segmentation Using Very Deep Fully Convolutional Networks", "addresses": [{"address": "University of Adelaide", "lat": "-34.91892260", "lng": "138.60423668", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/247c/a98c5a46616044cf6ae32b0d5b4140a7a161.pdf"}, {"id": "5f771fed91c8e4b666489ba2384d0705bcf75030", "title": "Understanding Humans in Crowded Scenes: Deep Nested Adversarial Learning and A New Benchmark for Multi-Human Parsing", "addresses": [{"address": "National University of Defense Technology, China", "lat": "28.22902090", "lng": "112.99483204", "type": "edu"}, {"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.03287.pdf"}, {"id": "d115c4a66d765fef596b0b171febca334cea15b5", "title": "Combining Stacked Denoising Autoencoders and Random Forests for Face Detection", "addresses": [{"address": "Swansea University", "lat": "51.60915780", "lng": "-3.97934429", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d115/c4a66d765fef596b0b171febca334cea15b5.pdf"}, {"id": "e20ab84ac7fa0a5d36d4cf2266b7065c60e1c804", "title": "Stacked U-Nets for Ground Material Segmentation in Remote Sensing Imagery", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/e20a/b84ac7fa0a5d36d4cf2266b7065c60e1c804.pdf"}, {"id": "a1fdf45e6649b0020eb533c70d6062b9183561ff", "title": "Where's YOUR focus: Personalized Attention", "addresses": [{"address": "Hong Kong University of Science and Technology", "lat": "22.33863040", "lng": "114.26203370", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1802.07931.pdf"}, {"id": "775c51b965e8ff37646a265aab64136b4a620526", "title": "Three viewpoints toward exemplar SVM", "addresses": [{"address": "National Institute of Advanced Industrial Science and Technology", "lat": "36.05238585", "lng": "140.11852361", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2A_059_ext.pdf"}, {"id": "0688c0568f3ab418719260d443cc0d86c3af2914", "title": "Curriculum Domain Adaptation for Semantic Segmentation of Urban Scenes", "addresses": [{"address": "University of Central Florida", "lat": "28.59899755", "lng": "-81.19712501", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1707.09465.pdf"}, {"id": "5d92531e74c4c2cdce91fdcd3c7ff090c8c29504", "title": "Synthesizing Scenes for Instance Detection", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/5d92/531e74c4c2cdce91fdcd3c7ff090c8c29504.pdf"}, {"id": "c919a9f61656cdcd3a26076057ee006c48e8f609", "title": "High-Value Target Detection", "addresses": [{"address": "University of Tartu", "lat": "58.38131405", "lng": "26.72078081", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/c919/a9f61656cdcd3a26076057ee006c48e8f609.pdf"}, {"id": "c6ce8eb37dafed09e1c55735fd1f1e9dc9c6bfe2", "title": "Joint background reconstruction and foreground segmentation via a two-stage convolutional neural network", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1707.07584.pdf"}, {"id": "c0006a2268d299644e9f1b455601bcbe89ddc2b5", "title": "Semantic Video Segmentation by Gated Recurrent Flow Propagation", "addresses": [{"address": "Lund University", "lat": "55.70395710", "lng": "13.19020110", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1612.08871.pdf"}, {"id": "273b9b7c63ac9196fb12734b49b74d0523ca4df4", "title": "The Secrets of Salient Object Segmentation", "addresses": [{"address": "California Institute of Technology", "lat": "34.13710185", "lng": "-118.12527487", "type": "edu"}, {"address": "University of California, Los Angeles", "lat": "34.06877880", "lng": "-118.44500940", "type": "edu"}], "year": 2014, "pdf": "https://arxiv.org/pdf/1406.2807v2.pdf"}, {"id": "e771661fa441f008c111ea786eb275153919da6e", "title": "Globally Optimal Object Tracking with Fully Convolutional Networks", "addresses": [{"address": "Kyushu University", "lat": "33.59914655", "lng": "130.22359848", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/e771/661fa441f008c111ea786eb275153919da6e.pdf"}, {"id": "5feacd9dd73827fb438a6bf6c8b406f4f11aa2fa", "title": "Slanted Stixels: Representing San Francisco's Steepest Streets", "addresses": [{"address": "Universitat Aut\u00f2noma de Barcelona", "lat": "41.50078110", "lng": "2.11143663", "type": "edu"}, {"address": "ETH Z\u00fcrich", "lat": "47.37645340", "lng": "8.54770931", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/5fea/cd9dd73827fb438a6bf6c8b406f4f11aa2fa.pdf"}]}
\ No newline at end of file +{"id": "0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a", "paper": {"paper_id": "0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a", "key": "voc", "title": "The Pascal Visual Object Classes (VOC) Challenge", "year": 2009, "pdf": "https://doi.org/10.1007/s11263-009-0275-4", "address": "", "name": "VOC"}, "address": null, "additional_papers": [], "citations": [{"id": "9f4078773c8ea3f37951bf617dbce1d4b3795839", "title": "Leveraging Inexpensive Supervision Signals for Visual Learning", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/9f40/78773c8ea3f37951bf617dbce1d4b3795839.pdf"}, {"id": "0e2af97f07625cb3cf5e30f1c9d807124cbbc850", "title": "From Large Scale Image Categorization to Entry-Level Categories", "addresses": [{"address": "University of North Carolina at Chapel Hill", "lat": "35.91139710", "lng": "-79.05045290", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Ordonez_From_Large_Scale_2013_ICCV_paper.pdf"}, {"id": "131e9edbe4b0322a467b7e8c35f6b0c0ca750e21", "title": "Contextual Action Recognition with R*CNN", "addresses": [{"address": "Microsoft", "lat": "47.64233180", "lng": "-122.13693020", "type": "company"}], "year": 2015, "pdf": "http://openaccess.thecvf.com/content_iccv_2015/papers/Gkioxari_Contextual_Action_Recognition_ICCV_2015_paper.pdf"}, {"id": "396aacab076a3607429f58ce442d5d57b5aaa794", "title": "Semantic Instance Annotation of Street Scenes by 3D to 2D Label Transfer", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}], "year": 2016, "pdf": "http://arxiv.org/pdf/1511.03240v2.pdf"}, {"id": "ef8de1bd92e9ee9d0d2dee73095d4d348dc54a98", "title": "Fine-grained Activity Recognition with Holistic and Pose based Features", "addresses": [{"address": "Max Planck Institute for Informatics", "lat": "49.25795660", "lng": "7.04577417", "type": "edu"}, {"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": "2014", "pdf": "https://arxiv.org/pdf/1406.1881.pdf"}, {"id": "86c158ef6caaf247d5d14e07c5edded0147df8b7", "title": "Spatial Memory for Context Reasoning in Object Detection", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1704.04224.pdf"}, {"id": "18fe63c013983bea53be7d559ef36a1f385ca6ea", "title": "Supervision Beyond Human Annotations for Learning Visual Representations", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/18fe/63c013983bea53be7d559ef36a1f385ca6ea.pdf"}, {"id": "663cca096b98c8f0444608b188e464028ee34368", "title": "CASENet: Deep Category-Aware Semantic Edge Detection", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1705.09759.pdf"}, {"id": "f9d171019bfeb71733fe36f7fae14f342ca9e51c", "title": "Hough Forests Revisited: An Approach to Multiple Instance Tracking from Multiple Cameras", "addresses": [{"address": "Graz University of Technology", "lat": "47.05821000", "lng": "15.46019568", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/f9d1/71019bfeb71733fe36f7fae14f342ca9e51c.pdf"}, {"id": "f832fdf1fac092b4140bf81d38e6bc6af5c1ea65", "title": "Instance-Level Human Parsing via Part Grouping Network", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1808.00157.pdf"}, {"id": "41be021880a916305c82199ddc2298eb271f6590", "title": "Benchmarks for Image Classification and Other High-dimensional Pattern Recognition Problems", "addresses": [{"address": "Purdue University", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.05272.pdf"}, {"id": "fdcc1e66697a724bd2d0d2da368de04a7eaf9209", "title": "The Devil is in the Decoder", "addresses": [{"address": "University College London", "lat": "51.52316070", "lng": "-0.12820370", "type": "edu"}, {"address": "Google", "lat": "37.42199990", "lng": "-122.08405750", "type": "company"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1707.05847.pdf"}, {"id": "6e7a9779dee831658e973ee26ac8bfed2d6da033", "title": "Human Pose Estimation for Multiple Frames", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/6e7a/9779dee831658e973ee26ac8bfed2d6da033.pdf"}, {"id": "29b3be93a60bbc5fe842826030853f99753b08bd", "title": "Hierarchical Scene Annotation", "addresses": [{"address": "California Institute of Technology", "lat": "34.13710185", "lng": "-118.12527487", "type": "edu"}], "year": "2013", "pdf": "https://pdfs.semanticscholar.org/6a94/c929f02e2ebd2477ca96d14334b311e9b829.pdf"}, {"id": "b88b83d2ffd30bf3bc3be3fb7492fd88f633b2fe", "title": "Subcategory-Aware Object Classification", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": 2013, "pdf": "http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989a827.pdf"}, {"id": "241b86d3c71d14b8cc6044a425b047a0724cfdc9", "title": "Following Gaze in Video", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2017, "pdf": "http://people.csail.mit.edu/recasens/docs/videogazefollow.pdf"}, {"id": "0d746111135c2e7f91443869003d05cde3044beb", "title": "Partial face detection for continuous authentication", "addresses": [{"address": "State University of New Jersey", "lat": "40.51865195", "lng": "-74.44099801", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICIP.2016.7532908"}, {"id": "420c46d7cafcb841309f02ad04cf51cb1f190a48", "title": "Multi-Scale Context Aggregation by Dilated Convolutions", "addresses": [{"address": "Princeton University", "lat": "40.34829285", "lng": "-74.66308325", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/b2a5/e98409c7a6a5e005348c023ccad23f5cb5a9.pdf"}, {"id": "076fd6fd85b93858155a1c775f1897f83d52b4c2", "title": "Improving an Object Detector and Extracting Regions Using Superpixels", "addresses": [{"address": "University of Central Florida", "lat": "28.59899755", "lng": "-81.19712501", "type": "edu"}], "year": 2013, "pdf": "http://www.crcv.ucf.edu/papers/cvpr2013/CVPR13_final_guang.pdf"}, {"id": "ccd3dcbccae7d903608530bddf6381db8e723a7d", "title": "Unsupervised Domain Adaptation for Semantic Segmentation with GANs", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}, {"address": "GE Global Research Center", "lat": "42.82982480", "lng": "-73.87719385", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/ccd3/dcbccae7d903608530bddf6381db8e723a7d.pdf"}, {"id": "b2cd92d930ed9b8d3f9dfcfff733f8384aa93de8", "title": "HyperFace: A Deep Multi-task Learning Framework for Face Detection, Landmark Localization, Pose Estimation, and Gender Recognition", "addresses": [{"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/b2cd/92d930ed9b8d3f9dfcfff733f8384aa93de8.pdf"}, {"id": "08eaa845a72a2b78e08e58592d8785942fced649", "title": "What's in a Question: Using Visual Questions as a Form of Supervision", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2017, "pdf": "http://arxiv.org/abs/1704.03895"}, {"id": "d0ac9913a3b1784f94446db2f1fb4cf3afda151f", "title": "Exploiting Multi-modal Curriculum in Noisy Web Data for Large-scale Concept Learning", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d0ac/9913a3b1784f94446db2f1fb4cf3afda151f.pdf"}, {"id": "3a3a4408432408b62e2dc22de7820a5a2f7bbe9e", "title": "No Spare Parts: Sharing Part Detectors for Image Categorization", "addresses": [{"address": "University of Amsterdam", "lat": "52.35536550", "lng": "4.95016440", "type": "edu"}, {"address": "Delft University of Technology", "lat": "51.99882735", "lng": "4.37396037", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1510.04908.pdf"}, {"id": "d4f1eb008eb80595bcfdac368e23ae9754e1e745", "title": "Unconstrained Face Detection and Open-Set Face Recognition Challenge", "addresses": [{"address": "University of Colorado, Colorado Springs", "lat": "38.89207560", "lng": "-104.79716389", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.02337.pdf"}, {"id": "33ba256d59aefe27735a30b51caf0554e5e3a1df", "title": "Early Active Learning via Robust Representation and Structured Sparsity", "addresses": [{"address": "University of Texas at Arlington", "lat": "32.72836830", "lng": "-97.11201835", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/33ba/256d59aefe27735a30b51caf0554e5e3a1df.pdf"}, {"id": "b7407b2ea67b8c82246f013f4966c4cac1507e60", "title": "Object Detection via End-to-End Integration of Aspect Ratio and Context Aware Part-based Models and Fully Convolutional Networks", "addresses": [{"address": "Jilin University", "lat": "22.05356500", "lng": "113.39913285", "type": "edu"}, {"address": "North Carolina State University", "lat": "35.77184965", "lng": "-78.67408695", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/b740/7b2ea67b8c82246f013f4966c4cac1507e60.pdf"}, {"id": "3af130e2fd41143d5fc49503830bbd7bafd01f8b", "title": "How Do We Evaluate the Quality of Computational Editing Systems?", "addresses": [{"address": "University of Wisconsin Madison", "lat": "43.07982815", "lng": "-89.43066425", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/db76/002794c12e5febc30510de58b54bb9344ea9.pdf"}, {"id": "732e4016225280b485c557a119ec50cffb8fee98", "title": "Are all training examples equally valuable?", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/732e/4016225280b485c557a119ec50cffb8fee98.pdf"}, {"id": "6341274aca0c2977c3e1575378f4f2126aa9b050", "title": "A multi-scale cascade fully convolutional network face detector", "addresses": [{"address": "University of Southern California", "lat": "34.02241490", "lng": "-118.28634407", "type": "edu"}], "year": 2016, "pdf": "http://arxiv.org/pdf/1609.03536v1.pdf"}, {"id": "9bd9050c53d90dfa86cb22501812afe6fc897406", "title": "Fine-Grained and Layered Object Recognition", "addresses": [{"address": "Xi'an Jiaotong University", "lat": "34.24749490", "lng": "108.97898751", "type": "edu"}, {"address": "Kyoto University", "lat": "35.02749960", "lng": "135.78154513", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/9bd9/050c53d90dfa86cb22501812afe6fc897406.pdf"}, {"id": "3e0a1884448bfd7f416c6a45dfcdfc9f2e617268", "title": "Understanding and Controlling User Linkability in Decentralized Learning", "addresses": [{"address": "Max Planck Institute for Informatics", "lat": "49.25795660", "lng": "7.04577417", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.05838.pdf"}, {"id": "108961c7366e36825ffed94ac9eab603e05b6bc6", "title": "Deep Visual-Semantic Alignments for Generating Image Descriptions", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": 2015, "pdf": "http://arxiv.org/abs/1412.2306"}, {"id": "282cee05661a690aa525f21b47c6ee39fb26a7c2", "title": "Build a Robust Learning Feature Descriptor by Using a New Image Visualization Method for Indoor Scenario Recognition", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/282c/ee05661a690aa525f21b47c6ee39fb26a7c2.pdf"}, {"id": "121503705689f46546cade78ff62963574b4750b", "title": "We Don\u2019t Need No Bounding-Boxes: Training Object Class Detectors Using Only Human Verification", "addresses": [{"address": "University of Edinburgh", "lat": "55.94951105", "lng": "-3.19534913", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1602.08405.pdf"}, {"id": "df0e280cae018cebd5b16ad701ad101265c369fa", "title": "Deep Attributes from Context-Aware Regional Neural Codes", "addresses": [{"address": "Columbia University", "lat": "40.84198360", "lng": "-73.94368971", "type": "edu"}, {"address": "Beihang University", "lat": "39.98083330", "lng": "116.34101249", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/df0e/280cae018cebd5b16ad701ad101265c369fa.pdf"}, {"id": "30654fd93360a339e271d4b194b7f7463b2c5dac", "title": "COSTA: Co-Occurrence Statistics for Zero-Shot Classification", "addresses": [{"address": "University of Amsterdam", "lat": "52.35536550", "lng": "4.95016440", "type": "edu"}], "year": 2014, "pdf": "https://ivi.fnwi.uva.nl/isis/publications/2014/MensinkCVPR2014/MensinkCVPR2014.pdf"}, {"id": "02ae77f4c289426f18e83ce6e295d39538fb0fcc", "title": "Dependency Modeling for Information Fusion with Applications in Visual Recognition", "addresses": [{"address": "Hong Kong Baptist University", "lat": "22.38742010", "lng": "114.20822220", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/02ae/77f4c289426f18e83ce6e295d39538fb0fcc.pdf"}, {"id": "23a84a4a77b6662d553c9252331e6b7920053125", "title": "Latent Model Ensemble with Auto-localization", "addresses": [{"address": "Electrical and Computer Engineering", "lat": "33.58667840", "lng": "-101.87539204", "type": "edu"}, {"address": "University of Missouri", "lat": "38.92676100", "lng": "-92.29193783", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/ad28/34a42fccfe965f2039591e2ac26453273405.pdf"}, {"id": "b7c5f885114186284c51e863b58292583047a8b4", "title": "GAdaBoost: Accelerating Adaboost Feature Selection with Genetic Algorithms", "addresses": [{"address": "American University in Cairo", "lat": "30.04287695", "lng": "31.23664139", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/b7c5/f885114186284c51e863b58292583047a8b4.pdf"}, {"id": "009678c2034cf4a9924a78d533d2ec81303a946e", "title": "Connecting Gaze, Scene, and Attention: Generalized Attention Estimation via Joint Modeling of Gaze and Scene Saliency", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.10437.pdf"}, {"id": "0ae80aa149764e91544bbe45b80bb50434e7bda9", "title": "Ambient Sound Provides Supervision for Visual Learning", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/714c/21c575d2c02a51f2dd5250164f1269be44ca.pdf"}, {"id": "16161051ee13dd3d836a39a280df822bf6442c84", "title": "Learning Efficient Object Detection Models with Knowledge Distillation", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}, {"address": "University of Missouri", "lat": "38.92676100", "lng": "-92.29193783", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/4bd3/f187f3e09483b1f0f92150a4a77409691b0f.pdf"}, {"id": "d6b1b0e60e1764982ef95d4ade8fcaa10bfb156a", "title": "A Sketch-based Approach for Multimedia Retrieval", "addresses": [{"address": "International Institute of Information Technology", "lat": "17.44549570", "lng": "78.34854698", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d6b1/b0e60e1764982ef95d4ade8fcaa10bfb156a.pdf"}, {"id": "079e20d0d870a5bade46cc9b4338a3d637399654", "title": "Semantic Segmentation , Urban Navigation , and Research Directions", "addresses": [{"address": "Princeton University", "lat": "40.34829285", "lng": "-74.66308325", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/079e/20d0d870a5bade46cc9b4338a3d637399654.pdf"}, {"id": "9035e87ce49b67b751838c7346d36fe481260217", "title": "An Introduction to Random Forests for Multi-class Object Detection", "addresses": [{"address": "Katholieke Universiteit Leuven", "lat": "50.88306860", "lng": "4.70195030", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/9035/e87ce49b67b751838c7346d36fe481260217.pdf"}, {"id": "24065d385bae5579be07607a1f63eb79cebf8773", "title": "Incremental Learning of NCM Forests for Large-Scale Image Classification", "addresses": [{"address": "University of Bonn", "lat": "50.73381240", "lng": "7.10224650", "type": "edu"}], "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.467"}, {"id": "7fbf1885f27fb72d5e553c4a2147375f928465ee", "title": "Not All Pixels Are Equal: Difficulty-Aware Semantic Segmentation via Deep Layer Cascade", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1704.01344.pdf"}, {"id": "a89e1fc2681a9a399cc5008ea34b5ec3fe7ca845", "title": "Improving Fast Segmentation With Teacher-Student Learning", "addresses": [{"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.08476.pdf"}, {"id": "3434ba5677e5c98e82ee17a1f2d0ddef66d0b009", "title": "Interactive tracking and action retrieval to support human behavior analysis", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/3434/ba5677e5c98e82ee17a1f2d0ddef66d0b009.pdf"}, {"id": "3676c29babe1563ee64a1149d2ae2f9f1369fe25", "title": "Visual saliency computation for image analysis", "addresses": [{"address": "Boston University", "lat": "42.35042530", "lng": "-71.10056114", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/3676/c29babe1563ee64a1149d2ae2f9f1369fe25.pdf"}, {"id": "a60540a8407fd117fd8e6857d4728e661f53dcc8", "title": "Deep Domain Generalization via Conditional Invariant Adversarial Networks", "addresses": [{"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}, {"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "University of Pittsburgh", "lat": "40.44415295", "lng": "-79.96243993", "type": "edu"}, {"address": "University of Sydney", "lat": "-33.88890695", "lng": "151.18943366", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/a605/40a8407fd117fd8e6857d4728e661f53dcc8.pdf"}, {"id": "4aa286914f17cd8cefa0320e41800a99c142a1cd", "title": "Leveraging Context to Support Automated Food Recognition in Restaurants", "addresses": [{"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}], "year": 2015, "pdf": "http://www.vbettadapura.com/egocentric/food/Food-Bettadapura15.pdf"}, {"id": "4ab10174a4f98f7e2da7cf6ccfeb9bc64c8e7da8", "title": "Efficient Metric Learning for Real-World Face Recognition", "addresses": [{"address": "Graz University of Technology", "lat": "47.05821000", "lng": "15.46019568", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/4ab1/0174a4f98f7e2da7cf6ccfeb9bc64c8e7da8.pdf"}, {"id": "e64fa00da02cc774559db5be88bc2862afbfd432", "title": "Histogram of Oriented Normal Vectors for Object Recognition with a Depth Sensor", "addresses": [{"address": "University of Missouri", "lat": "38.92676100", "lng": "-92.29193783", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/e64f/a00da02cc774559db5be88bc2862afbfd432.pdf"}, {"id": "0f0a5d8a7a087204026a6b67000887dbf5b6a20f", "title": "Generating objects going well with the surroundings", "addresses": [{"address": "Seoul National University", "lat": "37.26728000", "lng": "126.98411510", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.02925.pdf"}, {"id": "14a3194bb454f1f2e3fc1452045ac18c69959368", "title": "Fast Object Detection Using Multistage Particle Window Deformable Part Model", "addresses": [{"address": "National Chung Cheng University", "lat": "23.56306355", "lng": "120.47510531", "type": "edu"}], "year": 2014, "pdf": "http://www.cs.ccu.edu.tw/~wtchu/papers/2014ISM-chu.pdf"}, {"id": "187480101af3fb195993da1e2c17d917df24eb23", "title": "Unsupervised Visual Representation Learning by Context Prediction", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "University of California, Berkeley", "lat": "37.86871260", "lng": "-122.25586815", "type": "edu"}], "year": 2015, "pdf": "http://arxiv.org/pdf/1505.05192v2.pdf"}, {"id": "3b5787604b619c273bf98232b0bd3bce5d4a34ee", "title": "Learning Discriminative Hidden Structural Parts for Visual Tracking", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/3b57/87604b619c273bf98232b0bd3bce5d4a34ee.pdf"}, {"id": "95f4b88d4b0a725d786b34558b60af47f5442230", "title": "Reconfigurable Processor for Deep Learning in Autonomous Vehicles", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}, {"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/381e/7525bc8b9d47ae0343e471f5f1d5e6963bbe.pdf"}, {"id": "205e895e03969c96f3c482b0bd26308b16a12bd0", "title": "Image Captioning with an Intermediate Attributes Layer", "addresses": [{"address": "University of Adelaide", "lat": "-34.91892260", "lng": "138.60423668", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/205e/895e03969c96f3c482b0bd26308b16a12bd0.pdf"}, {"id": "6bb19408458dbae075be7f1612b969b565b4767a", "title": "Approximate Log-Hilbert-Schmidt Distances between Covariance Operators for Image Classification", "addresses": [{"address": "Dartmouth College", "lat": "43.70479270", "lng": "-72.29259090", "type": "edu"}], "year": 2016, "pdf": "http://openaccess.thecvf.com/content_cvpr_2016/papers/Minh_Approximate_Log-Hilbert-Schmidt_Distances_CVPR_2016_paper.pdf"}, {"id": "719969807953d7ea8bda0397b1aadbaa6e205718", "title": "Automatic Dataset Augmentation", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.08201.pdf"}, {"id": "bfea962697e9b445b89a865b465ae967dd55b4fe", "title": "Efficient object detection via structured learning and local classifiers", "addresses": [{"address": "Oxford Brookes University", "lat": "51.75552050", "lng": "-1.22615970", "type": "edu"}], "year": "2013", "pdf": "https://pdfs.semanticscholar.org/bfea/962697e9b445b89a865b465ae967dd55b4fe.pdf"}, {"id": "20b038c50cc7148dfb364e2de51cde120c907c9f", "title": "Integrated perception with recurrent multi-task neural networks", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1606.01735.pdf"}, {"id": "0faeec0d1c51623a511adb779dabb1e721a6309b", "title": "Seeing is Worse than Believing: Reading People's Minds Better than Computer-Vision Methods Recognize Actions", "addresses": [{"address": "National University of Ireland Maynooth", "lat": "53.38469750", "lng": "-6.60039458", "type": "edu"}, {"address": "Princeton University", "lat": "40.34829285", "lng": "-74.66308325", "type": "edu"}, {"address": "Purdue University", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu"}, {"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}, {"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}, {"address": "University of Michigan", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu"}, {"address": "University of Texas at Arlington", "lat": "32.72836830", "lng": "-97.11201835", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/a075/782ea38167658fe28986755adddba7369b4f.pdf"}, {"id": "fdfd57d4721174eba288e501c0c120ad076cdca8", "title": "An Analysis of Action Recognition Datasets for Language and Vision Tasks", "addresses": [{"address": "University of Edinburgh", "lat": "55.94951105", "lng": "-3.19534913", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1704.07129.pdf"}, {"id": "a2af07176a38fe844b0e2fdf4abae65472628b38", "title": "Dog breed classification via landmarks", "addresses": [{"address": "University of Delaware", "lat": "39.68103280", "lng": "-75.75401840", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/ICIP.2014.7026060"}, {"id": "ef2e36daf429899bb48d80ce6804731c3f99bb85", "title": "Debnath, Banerjee, Namboodiri: Adapting Ransac-svm to Detect Outliers for Robust Classification", "addresses": [{"address": "Indian Institute of Technology Kanpur", "lat": "26.51318800", "lng": "80.23651945", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/f7bd/b4df0fb5b3ff9fa0ebfe7c2a9ddc34c09a5c.pdf"}, {"id": "9d422e2c318ab63e6b49c83053757b4636f8308b", "title": "Object localization in ImageNet by looking out of the window", "addresses": [{"address": "University of Edinburgh", "lat": "55.94951105", "lng": "-3.19534913", "type": "edu"}], "year": "2015", "pdf": "https://arxiv.org/pdf/1501.01181.pdf"}, {"id": "71f36c8e17a5c080fab31fce1ffea9551fc49e47", "title": "Predicting Failures of Vision Systems", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2014, "pdf": "http://openaccess.thecvf.com/content_cvpr_2014/papers/Zhang_Predicting_Failures_of_2014_CVPR_paper.pdf"}, {"id": "7caa3a74313f9a7a2dd5b4c2cd7f825d895d3794", "title": "Markov Chain Monte Carlo for Automated Face Image Analysis", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": "2016", "pdf": "http://doi.org/10.1007/s11263-016-0967-5"}, {"id": "26c58e24687ccbe9737e41837aab74e4a499d259", "title": "Codemaps - Segment, Classify and Search Objects Locally", "addresses": [{"address": "University of Amsterdam", "lat": "52.35536550", "lng": "4.95016440", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Li_Codemaps_-_Segment_2013_ICCV_paper.pdf"}, {"id": "1f9102f425f28552e477cf71af0846550f3f9ed9", "title": "Incremental Domain Adaptation of Deformable Part-based Models", "addresses": [{"address": "Universitat Aut\u00f2noma de Barcelona", "lat": "41.50078110", "lng": "2.11143663", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/63ba/ef71a57d9ea6764c2bc3907cb6596b7f294a.pdf"}, {"id": "50953b9a15aca6ef3351e613e7215abdcae1435e", "title": "Learning coarse-to-fine sparselets for efficient object detection and scene classification", "addresses": [{"address": "Northwestern Polytechnical University", "lat": "34.24691520", "lng": "108.91061982", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/1B_007_ext.pdf"}, {"id": "a63104ad235f98bc5ee0b44fefbcdb49e32c205a", "title": "Has My Algorithm Succeeded? An Evaluator for Human Pose Estimators", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}, {"address": "University of Edinburgh", "lat": "55.94951105", "lng": "-3.19534913", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/a631/04ad235f98bc5ee0b44fefbcdb49e32c205a.pdf"}, {"id": "f6e00d6430cbbaa64789d826d093f7f3e323b082", "title": "Visual Object Recognition", "addresses": [{"address": "RWTH Aachen University", "lat": "50.77917030", "lng": "6.06728733", "type": "edu"}, {"address": "University of Texas at Austin", "lat": "30.28415100", "lng": "-97.73195598", "type": "edu"}], "year": "2011", "pdf": "https://pdfs.semanticscholar.org/5255/490925aa1e01ac0b9a55e93ec8c82efc07b7.pdf"}, {"id": "ae5b2b449f59ae0f46f6a31ed4826d98241c394c", "title": "Accurate real-time people counting for crowded environments", "addresses": [{"address": "University of Florence", "lat": "43.77764260", "lng": "11.25976500", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/ae5b/2b449f59ae0f46f6a31ed4826d98241c394c.pdf"}, {"id": "0b9c5bfb4d8349bb3f6ddd6fb612b7f9657c93f8", "title": "Inverting and Visualizing Features for Object Detection", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/0b9c/5bfb4d8349bb3f6ddd6fb612b7f9657c93f8.pdf"}, {"id": "2cd7821fcf5fae53a185624f7eeda007434ae037", "title": "Exploring the geo-dependence of human face appearance", "addresses": [{"address": "University of Kentucky", "lat": "38.03337420", "lng": "-84.50177580", "type": "edu"}], "year": 2014, "pdf": "http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf"}, {"id": "45e616093a92e5f1e61a7c6037d5f637aa8964af", "title": "Fine-grained evaluation on face detection in the wild", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2015, "pdf": "http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf"}, {"id": "81825711c2aaa1b9d3ead1a300e71c4353a41382", "title": "End-to-end training of object class detectors for mean average precision", "addresses": [{"address": "University of Edinburgh", "lat": "55.94951105", "lng": "-3.19534913", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1607.03476.pdf"}, {"id": "488fff23542ff397cdb1ced64db2c96320afc560", "title": "Weakly supervised localization of novel objects using appearance transfer", "addresses": [{"address": "University of Manitoba", "lat": "49.80915360", "lng": "-97.13304179", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_106_ext.pdf"}, {"id": "02b1a5d4b113211198e9c66d51153eb63ca680e2", "title": "Scene Invariant Crowd Segmentation and Counting Using Scale-Normalized Histogram of Moving Gradients (HoMG)", "addresses": [{"address": "University of Waterloo", "lat": "43.47061295", "lng": "-80.54724732", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/02b1/a5d4b113211198e9c66d51153eb63ca680e2.pdf"}, {"id": "7d520f474f2fc59422d910b980f8485716ce0a3e", "title": "Designing Convolutional Neural Networks for Urban Scene Understanding", "addresses": [{"address": "Robotics Institute", "lat": "13.65450525", "lng": "100.49423171", "type": "edu"}, {"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/2128/4a9310a4b4c836b8dfb6af39c682b7348128.pdf"}, {"id": "50f6888d9a7edb746e43a78c682a57c7ef36765b", "title": "Near-duplicate keyframe retrieval by semi-supervised learning and nonrigid image matching", "addresses": [{"address": "Zhejiang University", "lat": "30.19331415", "lng": "120.11930822", "type": "edu"}, {"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": 2011, "pdf": "http://www.cse.cuhk.edu.hk/lyu/_media/paper/jkzhu_comccap.pdf?cache=cache&id=publications:journal2"}, {"id": "72a4390a6c3b2bc2c3e7d83fc1f99e65e6137573", "title": "Collective Activity Localization with Contextual Spatial Pyramid", "addresses": [{"address": "University of Tokyo", "lat": "35.90204480", "lng": "139.93622009", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/72a4/390a6c3b2bc2c3e7d83fc1f99e65e6137573.pdf"}, {"id": "d012f6d7fd45051adaff8da4ce0860de9f81d445", "title": "A Deep Multi-task Learning Approach to Skin Lesion Classification", "addresses": [{"address": "University of Rochester", "lat": "43.15769690", "lng": "-77.58829158", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1812.03527.pdf"}, {"id": "f678f31e7bb5eda34098b0fed608cfad5e372509", "title": "Discriminative Kernel Feature Extraction and Learning for Object Recognition and Detection", "addresses": [{"address": "Southeast University", "lat": "32.05752790", "lng": "118.78682252", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/f678/f31e7bb5eda34098b0fed608cfad5e372509.pdf"}, {"id": "07ba3c796244f76b5e6914246fd83b66d3b65c34", "title": "Real-time Human Detection based on Personness Estimation", "addresses": [{"address": "Yonsei University", "lat": "37.56004060", "lng": "126.93692480", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/07ba/3c796244f76b5e6914246fd83b66d3b65c34.pdf"}, {"id": "fe5c43aa19da5cbbf5a42e4697659875f7389b91", "title": "Tracking People in Broadcast Sports", "addresses": [{"address": "ETH Z\u00fcrich", "lat": "47.37645340", "lng": "8.54770931", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/fe5c/43aa19da5cbbf5a42e4697659875f7389b91.pdf"}]}
\ No newline at end of file diff --git a/site/datasets/final/yfcc_100m.csv b/site/datasets/final/yfcc_100m.csv new file mode 100644 index 00000000..daee2cf4 --- /dev/null +++ b/site/datasets/final/yfcc_100m.csv @@ -0,0 +1,69 @@ +index,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,YFCC100M,yfcc_100m,0.0,0.0,,,a6e695ddd07aad719001c0fc1129328452385949,main,,The New Data and New Challenges in Multimedia Research,2015 +1,YFCC100M,yfcc_100m,45.5039761,-73.5749687,McGill University,edu,7d0ff6d0621b3846e8543bc162fd0215d8adfaf0,citation,http://openaccess.thecvf.com/content_cvpr_2016/papers/Iscen_Efficient_Large-Scale_Similarity_CVPR_2016_paper.pdf,Efficient Large-Scale Similarity Search Using Matrix Factorization,2016 +2,YFCC100M,yfcc_100m,42.3583961,-71.09567788,MIT,edu,8c192cd39f90eb8ff2969f8916ef8967607c5298,citation,http://pdfs.semanticscholar.org/9677/d2f6a994f598c1d631038d49401c5f707ee0.pdf,"See, Hear, and Read: Deep Aligned Representations",2017 +3,YFCC100M,yfcc_100m,47.5612651,7.5752961,University of Basel,edu,b7c8452ac9791563d9a739bd079b05e518b20aea,citation,http://pdfs.semanticscholar.org/b7c8/452ac9791563d9a739bd079b05e518b20aea.pdf,Web Video in Numbers - An Analysis of Web-Video Metadata,2017 +4,YFCC100M,yfcc_100m,37.43131385,-122.16936535,Stanford University,edu,7060f6062ba1cbe9502eeaaf13779aa1664224bb,citation,http://cs.stanford.edu/groups/vision/pdf/hata2017cscw.pdf,A Glimpse Far into the Future: Understanding Long-term Crowd Worker Quality,2017 +5,YFCC100M,yfcc_100m,37.4102193,-122.05965487,Carnegie Mellon University,edu,258dda85eadcd2081d1e0131826aceac7f1e2415,citation,http://pdfs.semanticscholar.org/e62d/40940a2711c7adca2857110272fb34d70576.pdf,Supervision Beyond Manual Annotations for Learning Visual Representations,2016 +6,YFCC100M,yfcc_100m,37.4102193,-122.05965487,Carnegie Mellon University,edu,2577211aeaaa1f2245ddc379564813bee3d46c06,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Misra_Seeing_Through_the_CVPR_2016_paper.pdf,Seeing through the Human Reporting Bias: Visual Classifiers from Noisy Human-Centric Labels,2016 +7,YFCC100M,yfcc_100m,47.6423318,-122.1369302,Microsoft,company,2577211aeaaa1f2245ddc379564813bee3d46c06,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Misra_Seeing_Through_the_CVPR_2016_paper.pdf,Seeing through the Human Reporting Bias: Visual Classifiers from Noisy Human-Centric Labels,2016 +8,YFCC100M,yfcc_100m,47.6543238,-122.30800894,University of Washington,edu,405526dfc79de98f5bf3c97bf4aa9a287700f15d,citation,http://pdfs.semanticscholar.org/8a6c/57fcd99a77982ec754e0b97fd67519ccb60c.pdf,MegaFace: A Million Faces for Recognition at Scale,2015 +9,YFCC100M,yfcc_100m,37.4102193,-122.05965487,Carnegie Mellon University,edu,18fe63c013983bea53be7d559ef36a1f385ca6ea,citation,http://pdfs.semanticscholar.org/18fe/63c013983bea53be7d559ef36a1f385ca6ea.pdf,Supervision Beyond Human Annotations for Learning Visual Representations,2015 +10,YFCC100M,yfcc_100m,33.776033,-84.39884086,Georgia Institute of Technology,edu,629b1bdf4d96bb41f7d3fce5c7d5617515303b71,citation,http://pdfs.semanticscholar.org/629b/1bdf4d96bb41f7d3fce5c7d5617515303b71.pdf,Diving Deeper into IM2GPS,2016 +11,YFCC100M,yfcc_100m,47.6543238,-122.30800894,University of Washington,edu,96e0cfcd81cdeb8282e29ef9ec9962b125f379b0,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527,The MegaFace Benchmark: 1 Million Faces for Recognition at Scale,2016 +12,YFCC100M,yfcc_100m,37.4102193,-122.05965487,Carnegie Mellon University,edu,d0ac9913a3b1784f94446db2f1fb4cf3afda151f,citation,http://pdfs.semanticscholar.org/d0ac/9913a3b1784f94446db2f1fb4cf3afda151f.pdf,Exploiting Multi-modal Curriculum in Noisy Web Data for Large-scale Concept Learning,2016 +13,YFCC100M,yfcc_100m,40.72925325,-73.99625394,New York University,edu,18078e72bddefffc24a6e882790aca8531773bed,citation,https://arxiv.org/pdf/1601.02306v1.pdf,Sublinear scaling of country attractiveness observed from Flickr dataset,2015 +14,YFCC100M,yfcc_100m,42.3583961,-71.09567788,MIT,edu,9677d2f6a994f598c1d631038d49401c5f707ee0,citation,https://arxiv.org/pdf/1706.00932.pdf,"See, Hear, and Read: Deep Aligned Representations",2017 +15,YFCC100M,yfcc_100m,42.3583961,-71.09567788,MIT,edu,1b6f3139b1e59b90ab1aaf978359229b75985b49,citation,http://pdfs.semanticscholar.org/847e/39b52a63a55fb94fff7ade1f90a7c67e508b.pdf,Learning with a Wasserstein Loss,2015 +16,YFCC100M,yfcc_100m,33.5934539,130.3557837,Information Technologies Institute,edu,ea985e35b36f05156f82ac2025ad3fe8037be0cd,citation,http://pdfs.semanticscholar.org/ea98/5e35b36f05156f82ac2025ad3fe8037be0cd.pdf,CERTH/CEA LIST at MediaEval Placing Task 2015,2015 +17,YFCC100M,yfcc_100m,37.43131385,-122.16936535,Stanford University,edu,518f3cb2c9f2481cdce7741c5a821c26378b75e9,citation,http://pdfs.semanticscholar.org/518f/3cb2c9f2481cdce7741c5a821c26378b75e9.pdf,The Unreasonable Effectiveness of Noisy Data for Fine-Grained Recognition,2016 +18,YFCC100M,yfcc_100m,37.4102193,-122.05965487,Carnegie Mellon University,edu,982ede05154c1afdcf6fc623ba45186a34f4b9f2,citation,https://doi.org/10.1109/TMM.2017.2659221,The Many Shades of Negativity,2017 +19,YFCC100M,yfcc_100m,-33.8809651,151.20107299,University of Technology Sydney,edu,982ede05154c1afdcf6fc623ba45186a34f4b9f2,citation,https://doi.org/10.1109/TMM.2017.2659221,The Many Shades of Negativity,2017 +20,YFCC100M,yfcc_100m,46.0658836,11.1159894,University of Trento,edu,982ede05154c1afdcf6fc623ba45186a34f4b9f2,citation,https://doi.org/10.1109/TMM.2017.2659221,The Many Shades of Negativity,2017 +21,YFCC100M,yfcc_100m,37.4102193,-122.05965487,Carnegie Mellon University,edu,5996001b797ab2a0f55d5355cb168f25bfe56bbd,citation,http://doi.acm.org/10.1145/2671188.2749398,Content-Based Video Search over 1 Million Videos with 1 Core in 1 Second,2015 +22,YFCC100M,yfcc_100m,37.43131385,-122.16936535,Stanford University,edu,65c978a97f54cf255f01c6846d6c51b37c61f836,citation,http://pdfs.semanticscholar.org/65c9/78a97f54cf255f01c6846d6c51b37c61f836.pdf,A Glimpse Far into the Future: Understanding Long-term Crowd Worker Accuracy,2016 +23,YFCC100M,yfcc_100m,47.6543238,-122.30800894,University of Washington,edu,301486e8dad7a41a1a99fd6fba28ce153fe1e56e,citation,http://pdfs.semanticscholar.org/3014/86e8dad7a41a1a99fd6fba28ce153fe1e56e.pdf,Are Elephants Bigger than Butterflies? Reasoning about Sizes of Objects,2016 +24,YFCC100M,yfcc_100m,37.43131385,-122.16936535,Stanford University,edu,01a903739564f575b81c87f7a9e2cb7b609f7ada,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Johnson_Image_Retrieval_Using_2015_CVPR_paper.pdf,Image retrieval using scene graphs,2015 +25,YFCC100M,yfcc_100m,31.30104395,121.50045497,Fudan University,edu,c5e37630d0672e4d44f7dee83ac2c1528be41c2e,citation,http://dl.acm.org/citation.cfm?id=3078973,Multi-task Deep Neural Network for Joint Face Recognition and Facial Attribute Prediction,2017 +26,YFCC100M,yfcc_100m,37.3936717,-122.0807262,Facebook,company,05818eddd8a35fed7f3041d591ef966f8e79bd9a,citation,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_003_ext.pdf,Web scale photo hash clustering on a single machine,2015 +27,YFCC100M,yfcc_100m,37.4102193,-122.05965487,Carnegie Mellon University,edu,22954dd92a795d7f381465d1b353bcc41901430d,citation,http://pdfs.semanticscholar.org/3b04/f759e9b3c21defe2227374a008bec67751e3.pdf,Learning Visual Storylines with Skipping Recurrent Neural Networks,2016 +28,YFCC100M,yfcc_100m,47.6423318,-122.1369302,Microsoft,company,9bbc952adb3e3c6091d45d800e806d3373a52bac,citation,https://pdfs.semanticscholar.org/9bbc/952adb3e3c6091d45d800e806d3373a52bac.pdf,Learning Visual Classifiers using Human-centric Annotations,2015 +29,YFCC100M,yfcc_100m,37.4102193,-122.05965487,Carnegie Mellon University,edu,2c761495cf3dd320e229586f80f868be12360d4e,citation,http://arxiv.org/abs/1707.02968,Revisiting Unreasonable Effectiveness of Data in Deep Learning Era,2017 +30,YFCC100M,yfcc_100m,32.87935255,-117.23110049,"University of California, San Diego",edu,a9be20954e9177d8b2bc39747acdea4f5496f394,citation,http://acsweb.ucsd.edu/~yuw176/report/cvpr_2016.pdf,Event-Specific Image Importance,2016 +31,YFCC100M,yfcc_100m,52.3553655,4.9501644,University of Amsterdam,edu,256f09fe3163564958381d7f3727b5c27c19144c,citation,http://doi.acm.org/10.1145/2733373.2806335,Image2Emoji: Zero-shot Emoji Prediction for Visual Media,2015 +32,YFCC100M,yfcc_100m,37.43131385,-122.16936535,Stanford University,edu,891433740bf6d318782c468638722aebf8bef2f5,citation,http://pdfs.semanticscholar.org/8914/33740bf6d318782c468638722aebf8bef2f5.pdf,Multi-Frame Video Super-Resolution Using Convolutional Neural Networks,2016 +33,YFCC100M,yfcc_100m,47.6543238,-122.30800894,University of Washington,edu,85304f24f5a1800e66de20ad05e20c8c032b7d03,citation,http://pdfs.semanticscholar.org/8530/4f24f5a1800e66de20ad05e20c8c032b7d03.pdf,Understanding and Discovering Deliberate Self-harm Content in Social Media,2017 +34,YFCC100M,yfcc_100m,22.2081469,114.25964115,University of Hong Kong,edu,35ec869dd0637c933d35ab823202c13b9b5d9aad,citation,http://pdfs.semanticscholar.org/4498/06bcb0987db60a0f8647380f9c335078fb46.pdf,Effective Community Search for Large Attributed Graphs,2016 +35,YFCC100M,yfcc_100m,40.4319722,-86.92389368,Purdue University,edu,7c5dde400571fd357d1093e1829a8bd7917d8fcd,citation,https://arxiv.org/pdf/1704.05982.pdf,Retrospective Higher-Order Markov Processes for User Trails,2017 +36,YFCC100M,yfcc_100m,37.43131385,-122.16936535,Stanford University,edu,9ded64e83d3ba51513ea00de27c0c770a02b0cf4,citation,http://pdfs.semanticscholar.org/9ded/64e83d3ba51513ea00de27c0c770a02b0cf4.pdf,Image Classification using Transfer Learning from Siamese Networks based on Text Metadata Similarity,2016 +37,YFCC100M,yfcc_100m,1.2962018,103.77689944,National University of Singapore,edu,7d621ec871a03a01f5aa65253e9ae6c8aadaf798,citation,http://pdfs.semanticscholar.org/fa2a/0fd5c5d5d3f14bf3875d531372ba6957748d.pdf,Converting Static Image Datasets to Spiking Neuromorphic Datasets Using Saccades,2015 +38,YFCC100M,yfcc_100m,37.4585796,-122.17560525,SRI International,edu,33737f966cca541d5dbfb72906da2794c692b65b,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.238,Spotting Audio-Visual Inconsistencies (SAVI) in Manipulated Video,2017 +39,YFCC100M,yfcc_100m,52.3553655,4.9501644,University of Amsterdam,edu,33737f966cca541d5dbfb72906da2794c692b65b,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.238,Spotting Audio-Visual Inconsistencies (SAVI) in Manipulated Video,2017 +40,YFCC100M,yfcc_100m,42.3583961,-71.09567788,MIT,edu,988aa2583c63ada43ca260dd8b5a4a543725a483,citation,http://pdfs.semanticscholar.org/988a/a2583c63ada43ca260dd8b5a4a543725a483.pdf,Choosing the Right Home Location Definition Method for the Given Dataset,2015 +41,YFCC100M,yfcc_100m,32.9820799,-96.7566278,University of Texas at Dallas,edu,ac9516a589901f1421e8ce905dd8bc5b689317ca,citation,http://pdfs.semanticscholar.org/ac95/16a589901f1421e8ce905dd8bc5b689317ca.pdf,A Practical Framework for Executing Complex Queries over Encrypted Multimedia Data,2016 +42,YFCC100M,yfcc_100m,37.4102193,-122.05965487,Carnegie Mellon University,edu,d3008b4122e50a28f6cc1fa98ac6af28b42271ea,citation,http://dl.acm.org/citation.cfm?id=2806218,Searching Persuasively: Joint Event Detection and Evidence Recounting with Limited Supervision,2015 +43,YFCC100M,yfcc_100m,-33.8809651,151.20107299,University of Technology Sydney,edu,d3008b4122e50a28f6cc1fa98ac6af28b42271ea,citation,http://dl.acm.org/citation.cfm?id=2806218,Searching Persuasively: Joint Event Detection and Evidence Recounting with Limited Supervision,2015 +44,YFCC100M,yfcc_100m,38.0353682,-78.5035322,University of Virginia,edu,17e7a53456539dac2c9cf8631174c6388f64e24b,citation,https://arxiv.org/pdf/1612.01635.pdf,Learning to Detect Multiple Photographic Defects,2018 +45,YFCC100M,yfcc_100m,22.2081469,114.25964115,University of Hong Kong,edu,5d1ffb7ba3c53ecc5a90d40380ae235043c16344,citation,http://pdfs.semanticscholar.org/5d1f/fb7ba3c53ecc5a90d40380ae235043c16344.pdf,On Label-Aware Community Search,2016 +46,YFCC100M,yfcc_100m,35.9020448,139.93622009,University of Tokyo,edu,81f63e7344cc242416e37d791f7eb83ec2c07681,citation,https://arxiv.org/pdf/1804.06057.pdf,Multimodal Co-Training for Selecting Good Examples from Webly Labeled Video,2018 +47,YFCC100M,yfcc_100m,-37.8087465,144.9638875,RMIT University,edu,3ad6bd5c34b0866019b54f5976d644326069cb3d,citation,http://pdfs.semanticscholar.org/3ad6/bd5c34b0866019b54f5976d644326069cb3d.pdf,Towards Next Generation Touring: Personalized Group Tours,2016 +48,YFCC100M,yfcc_100m,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,02b852e698dfe85df39c24e7dd39dedf484893dd,citation,http://pdfs.semanticscholar.org/02b8/52e698dfe85df39c24e7dd39dedf484893dd.pdf,Collaborative Learning for Weakly Supervised Object Detection,2018 +49,YFCC100M,yfcc_100m,37.4102193,-122.05965487,Carnegie Mellon University,edu,187480101af3fb195993da1e2c17d917df24eb23,citation,http://arxiv.org/pdf/1505.05192v2.pdf,Unsupervised Visual Representation Learning by Context Prediction,2015 +50,YFCC100M,yfcc_100m,37.8687126,-122.25586815,"University of California, Berkeley",edu,187480101af3fb195993da1e2c17d917df24eb23,citation,http://arxiv.org/pdf/1505.05192v2.pdf,Unsupervised Visual Representation Learning by Context Prediction,2015 +51,YFCC100M,yfcc_100m,31.846918,117.29053367,Hefei University of Technology,edu,beeadf57a976f23f4fd6fa8a330eac6c81d3e3cd,citation,http://pdfs.semanticscholar.org/beea/df57a976f23f4fd6fa8a330eac6c81d3e3cd.pdf,ESGM : Event Enrichment and Summarization by Graph Model,2015 +52,YFCC100M,yfcc_100m,43.614386,7.071125,EURECOM,edu,beeadf57a976f23f4fd6fa8a330eac6c81d3e3cd,citation,http://pdfs.semanticscholar.org/beea/df57a976f23f4fd6fa8a330eac6c81d3e3cd.pdf,ESGM : Event Enrichment and Summarization by Graph Model,2015 +53,YFCC100M,yfcc_100m,31.2284923,121.40211389,East China Normal University,edu,beeadf57a976f23f4fd6fa8a330eac6c81d3e3cd,citation,http://pdfs.semanticscholar.org/beea/df57a976f23f4fd6fa8a330eac6c81d3e3cd.pdf,ESGM : Event Enrichment and Summarization by Graph Model,2015 +54,YFCC100M,yfcc_100m,38.2530945,140.8736593,Tohoku University,edu,171042ba12818238e3c0994ff08d71f8c28d4134,citation,http://pdfs.semanticscholar.org/1710/42ba12818238e3c0994ff08d71f8c28d4134.pdf,Learning to Describe E-Commerce Images from Noisy Online Data,2016 +55,YFCC100M,yfcc_100m,42.4505507,-76.4783513,Cornell University,edu,8a8861ad6caedc3993e31d46e7de6c251a8cda22,citation,https://arxiv.org/pdf/1706.01869.pdf,StreetStyle: Exploring world-wide clothing styles from millions of photos,2017 +56,YFCC100M,yfcc_100m,47.6423318,-122.1369302,Microsoft,company,19d1855e021561d6da9d0200bb18e47f51cddda6,citation,https://arxiv.org/pdf/1604.03968.pdf,Visual Storytelling,2016 +57,YFCC100M,yfcc_100m,37.4102193,-122.05965487,Carnegie Mellon University,edu,19d1855e021561d6da9d0200bb18e47f51cddda6,citation,https://arxiv.org/pdf/1604.03968.pdf,Visual Storytelling,2016 +58,YFCC100M,yfcc_100m,42.3583961,-71.09567788,MIT,edu,0ae80aa149764e91544bbe45b80bb50434e7bda9,citation,http://pdfs.semanticscholar.org/714c/21c575d2c02a51f2dd5250164f1269be44ca.pdf,Ambient Sound Provides Supervision for Visual Learning,2016 +59,YFCC100M,yfcc_100m,47.6423318,-122.1369302,Microsoft,company,30193451e552286645baa00db7dcd05780d9e1da,citation,https://pdfs.semanticscholar.org/3019/3451e552286645baa00db7dcd05780d9e1da.pdf,On Available Corpora for Empirical Methods in Vision & Language,2015 +60,YFCC100M,yfcc_100m,42.3504253,-71.10056114,Boston University,edu,16815ef660ef9e4091a81044d430591348df72ee,citation,http://pdfs.semanticscholar.org/1681/5ef660ef9e4091a81044d430591348df72ee.pdf,Combining Texture and Shape Cues for Object Recognition with Minimal Supervision,2016 +61,YFCC100M,yfcc_100m,37.4102193,-122.05965487,Carnegie Mellon University,edu,2a2fd2538e19652721bc664f92056fbd08c604fd,citation,http://pdfs.semanticscholar.org/5042/096e3a80b14a6686014f338e0643f5270e65.pdf,Surveillance Video Analysis with External Knowledge and Internal Constraints,2016 +62,YFCC100M,yfcc_100m,38.0333742,-84.5017758,University of Kentucky,edu,4576b59a44f75120f6a2d17a4e9c52e894297661,citation,https://pdfs.semanticscholar.org/4576/b59a44f75120f6a2d17a4e9c52e894297661.pdf,Learning Geo-Temporal Image Features,2018 +63,YFCC100M,yfcc_100m,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,4cfd770ccecae1c0b4248bc800d7fd35c817bbbd,citation,https://pdfs.semanticscholar.org/8774/e206564df3bf9050f8c2be6b434cc2469c5b.pdf,A Discriminative Feature Learning Approach for Deep Face Recognition,2016 +64,YFCC100M,yfcc_100m,22.42031295,114.20788644,Chinese University of Hong Kong,edu,4cfd770ccecae1c0b4248bc800d7fd35c817bbbd,citation,https://pdfs.semanticscholar.org/8774/e206564df3bf9050f8c2be6b434cc2469c5b.pdf,A Discriminative Feature Learning Approach for Deep Face Recognition,2016 +65,YFCC100M,yfcc_100m,33.5934539,130.3557837,Information Technologies Institute,edu,7f05df12dff3defee495507abd4870a0a30c3590,citation,http://pdfs.semanticscholar.org/7f05/df12dff3defee495507abd4870a0a30c3590.pdf,Placing Images with Refined Language Models and Similarity Search with PCA-reduced VGG Features,2016 +66,YFCC100M,yfcc_100m,39.65404635,-79.96475355,West Virginia University,edu,b7b421be7c1dcbb8d41edb11180ba6ec87511976,citation,https://arxiv.org/pdf/1805.00324.pdf,A Deep Face Identification Network Enhanced by Facial Attributes Prediction,2018 +67,YFCC100M,yfcc_100m,49.2579566,7.04577417,Max Planck Institute for Informatics,edu,7fda1edac608bc67e55ac3d7c9dc5a542d8f8aee,citation,http://pdfs.semanticscholar.org/b742/8da870a9872ecdaa6feaaab43c0bcd136dd2.pdf,Multimodal Compact Bilinear Pooling for Visual Question Answering and Visual Grounding,2016 diff --git a/site/datasets/final/youtube_makeup.json b/site/datasets/final/youtube_makeup.json index 4d6319f3..ec06d6ed 100644 --- a/site/datasets/final/youtube_makeup.json +++ b/site/datasets/final/youtube_makeup.json @@ -1 +1 @@ -{"id": "fcc6fe6007c322641796cb8792718641856a22a7", "paper": {"paper_id": "fcc6fe6007c322641796cb8792718641856a22a7", "key": "youtube_makeup", "title": "Automatic facial makeup detection with application in face recognition", "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994", "address": {"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}, "name": "YMU"}, "address": {"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}, "additional_papers": [{"paper_id": "fcc6fe6007c322641796cb8792718641856a22a7", "key": "youtube_makeup", "title": "Automatic facial makeup detection with application in face recognition", "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994", "address": {"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}, "name": "YMU"}], "citations": [{"id": "d61e794ec22a4d4882181da17316438b5b24890f", "title": "Detecting Sensor Level Spoof Attacks Using Joint Encoding of Temporal and Spatial Features", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}], "year": "", "pdf": "http://pdfs.semanticscholar.org/d61e/794ec22a4d4882181da17316438b5b24890f.pdf"}, {"id": "3be8f1f7501978287af8d7ebfac5963216698249", "title": "Deep Cascaded Regression for Face Alignment", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/3be8/f1f7501978287af8d7ebfac5963216698249.pdf"}, {"id": "329d58e8fb30f1bf09acb2f556c9c2f3e768b15c", "title": "Leveraging Intra and Inter-Dataset Variations for Robust Face Alignment", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2017, "pdf": "http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Wu_Leveraging_Intra_and_CVPR_2017_paper.pdf"}, {"id": "cf5c9b521c958b84bb63bea9d5cbb522845e4ba7", "title": "Towards Arbitrary-View Face Alignment by Recommendation Trees", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/cf5c/9b521c958b84bb63bea9d5cbb522845e4ba7.pdf"}, {"id": "159caaa56c2291bedbd41d12af5546a7725c58d4", "title": "A joint optimization scheme to combine different levels of features for face recognition with makeup changes", "addresses": [{"address": "Beijing Advanced Innovation Center for Imaging Technology", "lat": "39.92907420", "lng": "116.31093150", "type": "edu"}, {"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICIP.2016.7532910"}, {"id": "cdf0dc4e06d56259f6c621741b1ada5c88963c6d", "title": "Makeup-insensitive face recognition by facial depth reconstruction and Gabor filter bank from women's real-world images", "addresses": [{"address": "Amirkabir University of Technology", "lat": "35.70451400", "lng": "51.40972058", "type": "edu"}, {"address": "Semnan University", "lat": "35.60374440", "lng": "53.43445877", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/ICIP.2014.7025061"}, {"id": "23e824d1dfc33f3780dd18076284f07bd99f1c43", "title": "Spoofing faces using makeup: An investigative study", "addresses": [{"address": "INRIA M\u00e9diterran\u00e9e", "lat": "43.61581310", "lng": "7.06838000", "type": "edu"}, {"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686"}, {"id": "55bc7abcef8266d76667896bbc652d081d00f797", "title": "Impact of facial cosmetics on automatic gender and age estimation algorithms", "addresses": [{"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}, {"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2014, "pdf": "http://www.cse.msu.edu/~rossarun/pubs/ChenCosmeticsGenderAge_VISAPP2014.pdf"}, {"id": "1a53ca294bbe5923c46a339955e8207907e9c8c6", "title": "What Else Does Your Biometric Data Reveal? A Survey on Soft Biometrics", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}, {"address": "EURECOM", "lat": "43.61438600", "lng": "7.07112500", "type": "edu"}], "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7273870"}, {"id": "1922ad4978ab92ce0d23acc4c7441a8812f157e5", "title": "Face alignment by coarse-to-fine shape searching", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2015, "pdf": "http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2015_alignment.pdf"}, {"id": "0e05b365af662bc6744106a7cdf5e77c9900e967", "title": "Assessment of female facial beauty based on anthropometric, non-permanent and acquisition characteristics", "addresses": [{"address": "EURECOM", "lat": "43.61438600", "lng": "7.07112500", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1007/s11042-014-2234-5"}, {"id": "72119cb98f9502ec639de317dccea57fd4b9ee55", "title": "A new approach for face recognition under makeup changes", "addresses": [{"address": "University of Delaware", "lat": "39.68103280", "lng": "-75.75401840", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/GlobalSIP.2015.7418230"}, {"id": "a8bb698d1bb21b81497ef68f0f52fa6eaf14a6bf", "title": "Face Authentication With Makeup Changes", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}], "year": "2014", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6587752"}, {"id": "8e63715d458ff79170a010c283c79427ce81ff0c", "title": "Demography-based facial retouching detection using subclass supervised sparse autoencoder", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1709.07598.pdf"}, {"id": "cd010fc089c580c87c5cff4aa6a9b1d6d41e2470", "title": "Digital Images Authentication Technique Based on DWT, DCT and Local Binary Patterns", "addresses": [{"address": "University of Kent", "lat": "51.29753440", "lng": "1.07296165", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/cd01/0fc089c580c87c5cff4aa6a9b1d6d41e2470.pdf"}, {"id": "ac21c8aceea6b9495574f8f9d916e571e2fc497f", "title": "Pose-Independent Identity-based Facial Image Retrieval using Contextual Similarity", "addresses": [{"address": "King Abdullah University of Science and Technology, Saudi Arabia", "lat": "22.31055485", "lng": "39.10515486", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/ac21/c8aceea6b9495574f8f9d916e571e2fc497f.pdf"}, {"id": "21bd9374c211749104232db33f0f71eab4df35d5", "title": "Integrating facial makeup detection into multimodal biometric user verification system", "addresses": [{"address": "EURECOM", "lat": "43.61438600", "lng": "7.07112500", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/IWBF.2017.7935101"}, {"id": "e1179a5746b4bf12e1c8a033192326bf7f670a4d", "title": "Facial makeup detection technique based on texture and shape analysis", "addresses": [{"address": "EURECOM", "lat": "43.61438600", "lng": "7.07112500", "type": "edu"}, {"address": "T\u00e9l\u00e9com ParisTech", "lat": "43.62716550", "lng": "7.04109170", "type": "edu"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163104"}, {"id": "6c66ae815e7e508e852ecb122fb796abbcda16a8", "title": "Expression Recognition Databases and Methods", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/6c66/ae815e7e508e852ecb122fb796abbcda16a8.pdf"}, {"id": "06850b60e33baa4ea9473811d58c0d5015da079e", "title": "A Survey of the Trends in Facial and Expression Recognition Databases and Methods", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/4cff/901521af06d6a0c98c9dce253296dd88b496.pdf"}, {"id": "b259f57f41f4b3b5b7ca29c5acb6f42186bbcf23", "title": "An interactive virtual mirror to support makeup for visually impaired persons", "addresses": [{"address": "University of Tsukuba", "lat": "36.11120580", "lng": "140.10551760", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/SMC.2017.8122808"}, {"id": "28a45770faf256f294ce3bbd5de25c6d5700976e", "title": "Accurate mouth state estimation via convolutional neural networks", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICDSP.2016.7868531"}, {"id": "23aba7b878544004b5dfa64f649697d9f082b0cf", "title": "Locality-constrained discriminative learning and coding", "addresses": [{"address": "Northeastern University", "lat": "42.33836680", "lng": "-71.08793524", "type": "edu"}], "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2015.7301315"}, {"id": "e66b4aa85524f493dafde8c75176ac0afad5b79c", "title": "Watchlist risk assessment using multiparametric cost and relative entropy", "addresses": [{"address": "University of Calgary", "lat": "51.07840380", "lng": "-114.12870770", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/SSCI.2017.8285219"}, {"id": "fb6f5cb26395608a3cf0e9c6c618293a4278a8ad", "title": "Facial Image Attributes Transformation via Conditional Recycle Generative Adversarial Networks", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "http://doi.org/10.1007/s11390-018-1835-2"}, {"id": "e6d46d923f201da644ae8d8bd04721dd9ac0e73d", "title": "Robust transgender face recognition: Approach based on appearance and therapy factors", "addresses": [{"address": "International Institute of Information Technology", "lat": "17.44549570", "lng": "78.34854698", "type": "edu"}, {"address": "Norwegian Biometrics Laboratory, NTNU, Norway", "lat": "60.78973180", "lng": "10.68219270", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ISBA.2016.7477226"}, {"id": "965f8bb9a467ce9538dec6bef57438964976d6d9", "title": "Recognizing human faces under disguise and makeup", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}], "year": 2016, "pdf": "http://www4.comp.polyu.edu.hk/~csajaykr/myhome/papers/ISBA2016.pdf"}, {"id": "45edb29fb7eed5a52040300e1fd3cd53f1bdb429", "title": "Facial makeup detection via selected gradient orientation of entropy information", "addresses": [{"address": "National Chung Hsing University", "lat": "24.12084345", "lng": "120.67571165", "type": "edu"}, {"address": "National Taiwan University", "lat": "25.01682835", "lng": "121.53846924", "type": "edu"}, {"address": "Academia Sinica, Taiwan", "lat": "25.04117270", "lng": "121.61465180", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICIP.2015.7351570"}, {"id": "272e487dfa32f241b622ac625f42eae783b7d9aa", "title": "Face recognition via semi-supervised discriminant local analysis", "addresses": [{"address": "Multimedia University", "lat": "2.92749755", "lng": "101.64185301", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICSIPA.2015.7412207"}, {"id": "0f64e26d6dd6f1c99fe2050887fac26cafe9ed60", "title": "Bridging the Gap Between Forensics and Biometric-Enabled Watchlists for e-Borders", "addresses": [{"address": "University of Calgary", "lat": "51.07840380", "lng": "-114.12870770", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/MCI.2016.2627668"}]}
\ No newline at end of file +{"id": "37d6f0eb074d207b53885bd2eb78ccc8a04be597", "paper": {"paper_id": "37d6f0eb074d207b53885bd2eb78ccc8a04be597", "key": "youtube_makeup", "title": "Can facial cosmetics affect the matching accuracy of face recognition systems?", "year": 2012, "pdf": "http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf", "address": "", "name": "YMU"}, "address": {"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}, "additional_papers": [{"paper_id": "fcc6fe6007c322641796cb8792718641856a22a7", "key": "youtube_makeup", "title": "Automatic facial makeup detection with application in face recognition", "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994", "address": {"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}, "name": "YMU"}], "citations": [{"id": "a32d4195f7752a715469ad99cb1e6ebc1a099de6", "title": "The Potential of Using Brain Images for Authentication", "addresses": [{"address": "National University of Defense Technology, China", "lat": "28.22902090", "lng": "112.99483204", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/a32d/4195f7752a715469ad99cb1e6ebc1a099de6.pdf"}, {"id": "0647c9d56cf11215894d57d677997826b22f6a13", "title": "Transgender face recognition with off-the-shelf pre-trained CNNs: A comprehensive study", "addresses": [{"address": "Norwegian Biometrics Lab, NTNU, Norway", "lat": "60.78973180", "lng": "10.68219270", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8401557"}, {"id": "f0f501e1e8726148d18e70c8e9f6feea9360d119", "title": "Jukka Komulainen SOFTWARE - BASED COUNTERMEASURES TO 2 D FACIAL", "addresses": [{"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/f0f5/01e1e8726148d18e70c8e9f6feea9360d119.pdf"}, {"id": "23e824d1dfc33f3780dd18076284f07bd99f1c43", "title": "Spoofing faces using makeup: An investigative study", "addresses": [{"address": "INRIA M\u00e9diterran\u00e9e", "lat": "43.61581310", "lng": "7.06838000", "type": "edu"}, {"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686"}, {"id": "272e487dfa32f241b622ac625f42eae783b7d9aa", "title": "Face recognition via semi-supervised discriminant local analysis", "addresses": [{"address": "Multimedia University", "lat": "2.92749755", "lng": "101.64185301", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICSIPA.2015.7412207"}, {"id": "21bd9374c211749104232db33f0f71eab4df35d5", "title": "Integrating facial makeup detection into multimodal biometric user verification system", "addresses": [{"address": "EURECOM", "lat": "43.61438600", "lng": "7.07112500", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/IWBF.2017.7935101"}, {"id": "0e05b365af662bc6744106a7cdf5e77c9900e967", "title": "Assessment of female facial beauty based on anthropometric, non-permanent and acquisition characteristics", "addresses": [{"address": "EURECOM", "lat": "43.61438600", "lng": "7.07112500", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1007/s11042-014-2234-5"}, {"id": "a8bb698d1bb21b81497ef68f0f52fa6eaf14a6bf", "title": "Face Authentication With Makeup Changes", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}], "year": "2014", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6587752"}, {"id": "55bc7abcef8266d76667896bbc652d081d00f797", "title": "Impact of facial cosmetics on automatic gender and age estimation algorithms", "addresses": [{"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}, {"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2014, "pdf": "http://www.cse.msu.edu/~rossarun/pubs/ChenCosmeticsGenderAge_VISAPP2014.pdf"}, {"id": "45edb29fb7eed5a52040300e1fd3cd53f1bdb429", "title": "Facial makeup detection via selected gradient orientation of entropy information", "addresses": [{"address": "National Chung Hsing University", "lat": "24.12084345", "lng": "120.67571165", "type": "edu"}, {"address": "National Taiwan University", "lat": "25.01682835", "lng": "121.53846924", "type": "edu"}, {"address": "Academia Sinica, Taiwan", "lat": "25.04117270", "lng": "121.61465180", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICIP.2015.7351570"}, {"id": "72119cb98f9502ec639de317dccea57fd4b9ee55", "title": "A new approach for face recognition under makeup changes", "addresses": [{"address": "University of Delaware", "lat": "39.68103280", "lng": "-75.75401840", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/GlobalSIP.2015.7418230"}, {"id": "1f745215cda3a9f00a65166bd744e4ec35644b02", "title": "Facial cosmetics database and impact analysis on automatic face recognition", "addresses": [{"address": "EURECOM", "lat": "43.61438600", "lng": "7.07112500", "type": "edu"}], "year": 2013, "pdf": "http://www.eurecom.fr/en/publication/4044/download/mm-publi-4044.pdf"}, {"id": "3a27d164e931c422d16481916a2fa6401b74bcef", "title": "Anti-Makeup: Learning A Bi-Level Adversarial Network for Makeup-Invariant Face Verification", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1709.03654.pdf"}, {"id": "69c2ac04693d53251500557316c854a625af84ee", "title": "50 years of biometric research: Accomplishments, challenges, and opportunities", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/dc97/ceb1faf945e780a92be651b022a82e3bff5a.pdf"}, {"id": "e1179a5746b4bf12e1c8a033192326bf7f670a4d", "title": "Facial makeup detection technique based on texture and shape analysis", "addresses": [{"address": "EURECOM", "lat": "43.61438600", "lng": "7.07112500", "type": "edu"}, {"address": "T\u00e9l\u00e9com ParisTech", "lat": "43.62716550", "lng": "7.04109170", "type": "edu"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163104"}, {"id": "8a0159919ee4e1a9f4cbfb652a1be212bf0554fd", "title": "Application of power laws to biometrics, forensics and network traffic analysis", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/8a01/59919ee4e1a9f4cbfb652a1be212bf0554fd.pdf"}, {"id": "fcc6fe6007c322641796cb8792718641856a22a7", "title": "Automatic facial makeup detection with application in face recognition", "addresses": [{"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}, {"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994"}, {"id": "1a53ca294bbe5923c46a339955e8207907e9c8c6", "title": "What Else Does Your Biometric Data Reveal? A Survey on Soft Biometrics", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}, {"address": "EURECOM", "lat": "43.61438600", "lng": "7.07112500", "type": "edu"}], "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7273870"}, {"id": "cdf0dc4e06d56259f6c621741b1ada5c88963c6d", "title": "Makeup-insensitive face recognition by facial depth reconstruction and Gabor filter bank from women's real-world images", "addresses": [{"address": "Amirkabir University of Technology", "lat": "35.70451400", "lng": "51.40972058", "type": "edu"}, {"address": "Semnan University", "lat": "35.60374440", "lng": "53.43445877", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/ICIP.2014.7025061"}, {"id": "407806f5fe3c5ecc2dc15b75d3d2b0359b4ee7e0", "title": "Enhanced independent spectral histogram representations in face recognition", "addresses": [{"address": "Multimedia University", "lat": "2.92749755", "lng": "101.64185301", "type": "edu"}, {"address": "Yonsei University", "lat": "37.56004060", "lng": "126.93692480", "type": "edu"}], "year": "2017", "pdf": "http://doi.org/10.1007/s11042-017-5028-8"}, {"id": "cd010fc089c580c87c5cff4aa6a9b1d6d41e2470", "title": "Digital Images Authentication Technique Based on DWT, DCT and Local Binary Patterns", "addresses": [{"address": "University of Kent", "lat": "51.29753440", "lng": "1.07296165", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/cd01/0fc089c580c87c5cff4aa6a9b1d6d41e2470.pdf"}, {"id": "5435d5f8b9f4def52ac84bee109320e64e58ab8f", "title": "Evaluating real-life performance of the state-of-the-art in facial expression recognition using a novel YouTube-based datasets", "addresses": [{"address": "University of Twente", "lat": "52.23801390", "lng": "6.85667610", "type": "edu"}, {"address": "Kyung Hee University", "lat": "32.85363330", "lng": "-117.20352860", "type": "edu"}, {"address": "SungKyunKwan University", "lat": "37.30031270", "lng": "126.97212300", "type": "edu"}], "year": "2016", "pdf": "http://doi.org/10.1007/s11042-016-4321-2"}, {"id": "965f8bb9a467ce9538dec6bef57438964976d6d9", "title": "Recognizing human faces under disguise and makeup", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}], "year": 2016, "pdf": "http://www4.comp.polyu.edu.hk/~csajaykr/myhome/papers/ISBA2016.pdf"}, {"id": "76d1c6c6b67e67ced1f19a89a5034dafc9599f25", "title": "Understanding OSN-based facial disclosure against face authentication systems", "addresses": [{"address": "Singapore Management University", "lat": "1.29500195", "lng": "103.84909214", "type": "edu"}], "year": 2014, "pdf": "http://doi.acm.org/10.1145/2590296.2590315"}, {"id": "d61e794ec22a4d4882181da17316438b5b24890f", "title": "Detecting Sensor Level Spoof Attacks Using Joint Encoding of Temporal and Spatial Features", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}], "year": "", "pdf": "http://pdfs.semanticscholar.org/d61e/794ec22a4d4882181da17316438b5b24890f.pdf"}, {"id": "3be8f1f7501978287af8d7ebfac5963216698249", "title": "Deep Cascaded Regression for Face Alignment", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/3be8/f1f7501978287af8d7ebfac5963216698249.pdf"}, {"id": "329d58e8fb30f1bf09acb2f556c9c2f3e768b15c", "title": "Leveraging Intra and Inter-Dataset Variations for Robust Face Alignment", "addresses": [{"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, {"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2017, "pdf": "http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Wu_Leveraging_Intra_and_CVPR_2017_paper.pdf"}, {"id": "cf5c9b521c958b84bb63bea9d5cbb522845e4ba7", "title": "Towards Arbitrary-View Face Alignment by Recommendation Trees", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/cf5c/9b521c958b84bb63bea9d5cbb522845e4ba7.pdf"}, {"id": "159caaa56c2291bedbd41d12af5546a7725c58d4", "title": "A joint optimization scheme to combine different levels of features for face recognition with makeup changes", "addresses": [{"address": "Beijing Advanced Innovation Center for Imaging Technology", "lat": "39.92907420", "lng": "116.31093150", "type": "edu"}, {"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICIP.2016.7532910"}, {"id": "1922ad4978ab92ce0d23acc4c7441a8812f157e5", "title": "Face alignment by coarse-to-fine shape searching", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2015, "pdf": "http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2015_alignment.pdf"}, {"id": "8e63715d458ff79170a010c283c79427ce81ff0c", "title": "Demography-based facial retouching detection using subclass supervised sparse autoencoder", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1709.07598.pdf"}, {"id": "ac21c8aceea6b9495574f8f9d916e571e2fc497f", "title": "Pose-Independent Identity-based Facial Image Retrieval using Contextual Similarity", "addresses": [{"address": "King Abdullah University of Science and Technology, Saudi Arabia", "lat": "22.31055485", "lng": "39.10515486", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/ac21/c8aceea6b9495574f8f9d916e571e2fc497f.pdf"}, {"id": "6c66ae815e7e508e852ecb122fb796abbcda16a8", "title": "Expression Recognition Databases and Methods", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/6c66/ae815e7e508e852ecb122fb796abbcda16a8.pdf"}, {"id": "06850b60e33baa4ea9473811d58c0d5015da079e", "title": "A Survey of the Trends in Facial and Expression Recognition Databases and Methods", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/4cff/901521af06d6a0c98c9dce253296dd88b496.pdf"}, {"id": "b259f57f41f4b3b5b7ca29c5acb6f42186bbcf23", "title": "An interactive virtual mirror to support makeup for visually impaired persons", "addresses": [{"address": "University of Tsukuba", "lat": "36.11120580", "lng": "140.10551760", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/SMC.2017.8122808"}, {"id": "28a45770faf256f294ce3bbd5de25c6d5700976e", "title": "Accurate mouth state estimation via convolutional neural networks", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICDSP.2016.7868531"}, {"id": "23aba7b878544004b5dfa64f649697d9f082b0cf", "title": "Locality-constrained discriminative learning and coding", "addresses": [{"address": "Northeastern University", "lat": "42.33836680", "lng": "-71.08793524", "type": "edu"}], "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2015.7301315"}, {"id": "e66b4aa85524f493dafde8c75176ac0afad5b79c", "title": "Watchlist risk assessment using multiparametric cost and relative entropy", "addresses": [{"address": "University of Calgary", "lat": "51.07840380", "lng": "-114.12870770", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/SSCI.2017.8285219"}, {"id": "fb6f5cb26395608a3cf0e9c6c618293a4278a8ad", "title": "Facial Image Attributes Transformation via Conditional Recycle Generative Adversarial Networks", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "http://doi.org/10.1007/s11390-018-1835-2"}, {"id": "e6d46d923f201da644ae8d8bd04721dd9ac0e73d", "title": "Robust transgender face recognition: Approach based on appearance and therapy factors", "addresses": [{"address": "International Institute of Information Technology", "lat": "17.44549570", "lng": "78.34854698", "type": "edu"}, {"address": "Norwegian Biometrics Laboratory, NTNU, Norway", "lat": "60.78973180", "lng": "10.68219270", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ISBA.2016.7477226"}, {"id": "0f64e26d6dd6f1c99fe2050887fac26cafe9ed60", "title": "Bridging the Gap Between Forensics and Biometric-Enabled Watchlists for e-Borders", "addresses": [{"address": "University of Calgary", "lat": "51.07840380", "lng": "-114.12870770", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/MCI.2016.2627668"}]}
\ No newline at end of file diff --git a/site/datasets/final/youtube_poses.csv b/site/datasets/final/youtube_poses.csv new file mode 100644 index 00000000..10205029 --- /dev/null +++ b/site/datasets/final/youtube_poses.csv @@ -0,0 +1,20 @@ +index,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,YouTube Pose,youtube_poses,0.0,0.0,,,1c2802c2199b6d15ecefe7ba0c39bfe44363de38,main,http://arxiv.org/pdf/1511.06676v1.pdf,Personalizing Human Video Pose Estimation,2016 +1,YouTube Pose,youtube_poses,50.7338124,7.1022465,University of Bonn,edu,267bd60e442d87c44eaae3290610138e63d663ab,citation,http://openaccess.thecvf.com/content_cvpr_2017/papers/Iqbal_PoseTrack_Joint_Multi-Person_CVPR_2017_paper.pdf,PoseTrack: Joint Multi-person Pose Estimation and Tracking,2017 +2,YouTube Pose,youtube_poses,-34.9189226,138.60423668,University of Adelaide,edu,267bd60e442d87c44eaae3290610138e63d663ab,citation,http://openaccess.thecvf.com/content_cvpr_2017/papers/Iqbal_PoseTrack_Joint_Multi-Person_CVPR_2017_paper.pdf,PoseTrack: Joint Multi-person Pose Estimation and Tracking,2017 +3,YouTube Pose,youtube_poses,17.4454957,78.34854698,International Institute of Information Technology,edu,185263189a30986e31566394680d6d16b0089772,citation,https://pdfs.semanticscholar.org/1852/63189a30986e31566394680d6d16b0089772.pdf,Efficient Annotation of Objects for Video Analysis,2018 +4,YouTube Pose,youtube_poses,52.17638955,0.14308882,University of Cambridge,edu,cd87fea30b68ad1c9ebcb71a224c53cde3516adb,citation,https://pdfs.semanticscholar.org/cd87/fea30b68ad1c9ebcb71a224c53cde3516adb.pdf,EXTRACTING THE X FACTOR IN HUMAN PARSING 3 Factored module Factored task Aggregation module Input Main task Shared features Silhouette Body parts The X Factor bottleneck layers bottleneck layers bottleneck layers Initial module bottleneck layers initial block,2018 +5,YouTube Pose,youtube_poses,51.49887085,-0.17560797,Imperial College London,edu,37aa876f5202d1db6919f0a0dd5a0f76508c02fb,citation,https://arxiv.org/pdf/1711.10872.pdf,Occlusion-Aware Hand Pose Estimation Using Hierarchical Mixture Density Network,2018 +6,YouTube Pose,youtube_poses,33.5866784,-101.87539204,Electrical and Computer Engineering,edu,0ca2f48fad7f69fb415ecbb99945250cbf8f011c,citation,https://pdfs.semanticscholar.org/0ca2/f48fad7f69fb415ecbb99945250cbf8f011c.pdf,Outliers Cleaning in Dynamic Systems,2017 +7,YouTube Pose,youtube_poses,42.3383668,-71.08793524,Northeastern University,edu,0ca2f48fad7f69fb415ecbb99945250cbf8f011c,citation,https://pdfs.semanticscholar.org/0ca2/f48fad7f69fb415ecbb99945250cbf8f011c.pdf,Outliers Cleaning in Dynamic Systems,2017 +8,YouTube Pose,youtube_poses,37.43131385,-122.16936535,Stanford University,edu,815e77b8f2e8f17205e46162b3addd02b2ea8ff0,citation,http://pdfs.semanticscholar.org/815e/77b8f2e8f17205e46162b3addd02b2ea8ff0.pdf,Marker-less Pose Estimation,2017 +9,YouTube Pose,youtube_poses,39.9492344,-75.19198985,University of Pennsylvania,edu,bbd9b5e4d4761d923d21a060513e826bf5bfc620,citation,https://arxiv.org/pdf/1704.04793.pdf,Harvesting Multiple Views for Marker-Less 3D Human Pose Annotations,2017 +10,YouTube Pose,youtube_poses,43.65815275,-79.3790801,Ryerson University,edu,bbd9b5e4d4761d923d21a060513e826bf5bfc620,citation,https://arxiv.org/pdf/1704.04793.pdf,Harvesting Multiple Views for Marker-Less 3D Human Pose Annotations,2017 +11,YouTube Pose,youtube_poses,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,10d255fb0bb651b6e9cc69855a970c44f121f2c9,citation,https://arxiv.org/pdf/1710.06513.pdf,Learning Pose Grammar to Encode Human Body Configuration for 3D Pose Estimation,2018 +12,YouTube Pose,youtube_poses,49.2579566,7.04577417,Max Planck Institute for Informatics,edu,3e682d368422ff31632760611039372a07eeabc6,citation,https://pdfs.semanticscholar.org/a254/e59f6fd1f8c51e3a5398c01cc1b45aebc66e.pdf,Articulated Multi-person Tracking in the Wild,2016 +13,YouTube Pose,youtube_poses,-35.2776999,149.118527,Australian National University,edu,ce2fd44a8c43642b76f219fe32291c1b2644cb73,citation,https://arxiv.org/pdf/1707.09240.pdf,Human Pose Forecasting via Deep Markov Models,2017 +14,YouTube Pose,youtube_poses,52.17638955,0.14308882,University of Cambridge,edu,4065d038ecbda579a0791aaf46fc62bbcba5b1f3,citation,http://pdfs.semanticscholar.org/4065/d038ecbda579a0791aaf46fc62bbcba5b1f3.pdf,Real-time Factored ConvNets: Extracting the X Factor in Human Parsing,2017 +15,YouTube Pose,youtube_poses,50.7338124,7.1022465,University of Bonn,edu,7a0cd36d02ad962f628d9d504d02a850e27d5bfb,citation,https://arxiv.org/pdf/1710.10000.pdf,PoseTrack: A Benchmark for Human Pose Estimation and Tracking,2017 +16,YouTube Pose,youtube_poses,49.2579566,7.04577417,Max Planck Institute for Informatics,edu,8e74244e220a1c9e89417caa1ad22f649884d311,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.142,ArtTrack: Articulated Multi-Person Tracking in the Wild,2017 +17,YouTube Pose,youtube_poses,65.0592157,25.46632601,University of Oulu,edu,a287643d3eddca3dcc09b3532f2b070a28d4a022,citation,http://pdfs.semanticscholar.org/a287/643d3eddca3dcc09b3532f2b070a28d4a022.pdf,Real-time Human Pose Estimation from Video with Convolutional Neural Networks,2016 +18,YouTube Pose,youtube_poses,60.18558755,24.8242733,Aalto University,edu,a287643d3eddca3dcc09b3532f2b070a28d4a022,citation,http://pdfs.semanticscholar.org/a287/643d3eddca3dcc09b3532f2b070a28d4a022.pdf,Real-time Human Pose Estimation from Video with Convolutional Neural Networks,2016 diff --git a/site/datasets/unknown/adience.json b/site/datasets/unknown/adience.json new file mode 100644 index 00000000..f3d667ef --- /dev/null +++ b/site/datasets/unknown/adience.json @@ -0,0 +1 @@ +{"id": "1be498d4bbc30c3bfd0029114c784bc2114d67c0", "paper": {"paper_id": "1be498d4bbc30c3bfd0029114c784bc2114d67c0", "key": "adience", "title": "Age and Gender Estimation of Unfiltered Faces", "year": 2014, "pdf": "http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf", "address": "", "name": "Adience"}, "citations": [{"id": "12e4545d07e1793df87520f384b37a015815d2f7", "title": "Age invariant face recognition: a survey on facial aging databases, techniques and effect of aging", "year": "2018", "pdf": "http://doi.org/10.1007/s10462-018-9661-z"}, {"id": "d9c0310203179d5328c4f1475fa4d68c5f0c7324", "title": "Face Analysis in the Wild", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI-T.2017.11"}, {"id": "be72b20247fb4dc4072d962ced77ed89aa40372f", "title": "Efficient Facial Representations for Age, Gender and Identity Recognition in Organizing Photo Albums using Multi-output CNN", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.07718.pdf"}, {"id": "8879083463a471898ff9ed9403b84db277be5bf6", "title": "Regression Facial Attribute Classification via simultaneous dictionary learning", "year": 2017, "pdf": "https://doi.org/10.1016/j.patcog.2016.08.031"}, {"id": "633c851ebf625ad7abdda2324e9de093cf623141", "title": "Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961727"}, {"id": "984edce0b961418d81203ec477b9bfa5a8197ba3", "title": "Customer and target individual face analysis for retail analytics", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369732"}, {"id": "ec0104286c96707f57df26b4f0a4f49b774c486b", "title": "An Ensemble CNN2ELM for Age Estimation", "year": 2018, "pdf": "http://www.cs.newpaltz.edu/~lik/publications/Mingxing-Duan-IEEE-TIFS-2018.pdf"}, {"id": "7cee802e083c5e1731ee50e731f23c9b12da7d36", "title": "2^B3^C: 2 Box 3 Crop of Facial Image for Gender Classification with Convolutional Networks", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/7cee/802e083c5e1731ee50e731f23c9b12da7d36.pdf"}, {"id": "b8b9cef0938975c5b640b7ada4e3dea6c06d64e9", "title": "Metric-Promoted Siamese Network for Gender Classification", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.119"}, {"id": "30457461333c8797457c18636732327e6dde1d04", "title": "Gender classification system for half face images using multi manifold discriminant analysis", "year": 2017, "pdf": null}, {"id": "0dccc881cb9b474186a01fd60eb3a3e061fa6546", "title": "Effective face frontalization in unconstrained images", "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_104_ext.pdf"}, {"id": "c95d8b9bddd76b8c83c8745747e8a33feedf3941", "title": "Image Ordinal Classification and Understanding: Grid Dropout with Masking Label", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.02901.pdf"}, {"id": "cb522b2e16b11dde48203bef97131ddca3cdaebd", "title": "Fusion of Domain-Specific and Trainable Features for Gender Recognition From Face Images", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8331979"}, {"id": "b07f9dfc904d317fa71c1efa9b466460abc0bee5", "title": "Deep learning for fusing multi-sensor person-borne IED data", "year": 2017, "pdf": null}, {"id": "07a1e6d26028b28185b7a3eee86752c240a24261", "title": "MODE: automated neural network model debugging via state differential analysis and input selection", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3236082"}, {"id": "c8adbe00b5661ab9b3726d01c6842c0d72c8d997", "title": "Deep Architectures for Face Attributes", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/c8ad/be00b5661ab9b3726d01c6842c0d72c8d997.pdf"}, {"id": "635158d2da146e9de559d2742a2fa234e06b52db", "title": "Emotion Recognition in the Wild via Convolutional Neural Networks and Mapped Binary Patterns", "year": 2015, "pdf": "http://www.openu.ac.il/home/hassner/projects/cnn_emotions/LeviHassnerICMI15.pdf"}, {"id": "841c99e887eb262e397fdf5b0490a2ae6c82d6b5", "title": "Feature extraction for facial age estimation: A survey", "year": 2016, "pdf": null}, {"id": "e5563a0d6a2312c614834dc784b5cc7594362bff", "title": "Real-Time Demographic Profiling from Face Imagery with Fisher Vectors", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/e556/3a0d6a2312c614834dc784b5cc7594362bff.pdf"}, {"id": "5e39deb4bff7b887c8f3a44dfe1352fbcde8a0bd", "title": "Supervised COSMOS Autoencoder: Learning Beyond the Euclidean Loss!", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.06221.pdf"}, {"id": "b972683d702a65d3ee7a25bc931a5890d1072b6b", "title": "Demographic Analysis from Biometric Data: Achievements, Challenges, and New Frontiers", "year": 2018, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2017.2669035"}, {"id": "28d4e027c7e90b51b7d8908fce68128d1964668a", "title": "Level Playing Field for Million Scale Face Recognition", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.00393.pdf"}, {"id": "50ff21e595e0ebe51ae808a2da3b7940549f4035", "title": "Age Group and Gender Estimation in the Wild With Deep RoR Architecture", "year": 2017, "pdf": "http://export.arxiv.org/pdf/1710.02985"}, {"id": "4f37f71517420c93c6841beb33ca0926354fa11d", "title": "A hybrid deep learning CNN-ELM for age and gender classification", "year": "2018", "pdf": "http://doi.org/10.1016/j.neucom.2017.08.062"}, {"id": "0dd74bbda5dd3d9305636d4b6f0dad85d6e19572", "title": "Heterogeneous Face Attribute Estimation: A Deep Multi-Task Learning Approach", "year": "2018", "pdf": "https://arxiv.org/pdf/1706.00906.pdf"}, {"id": "65d705bbcc10f42683503b3599327c816265d951", "title": "Articulated Motion and Deformable Objects", "year": "2014", "pdf": "http://doi.org/10.1007/978-3-319-94544-6"}, {"id": "7587a09d924cab41822a07cd1a988068b74baabb", "title": "Image scoring: Patch based CNN model for small or medium dataset", "year": 2017, "pdf": null}, {"id": "f77c9bf5beec7c975584e8087aae8d679664a1eb", "title": "Local Deep Neural Networks for Age and Gender Classification", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/f77c/9bf5beec7c975584e8087aae8d679664a1eb.pdf"}, {"id": "be0a0e563445119b82d664d370e646e53e69a4c5", "title": "Age and gender classification from speech and face images by jointly fine-tuned deep neural networks", "year": 2017, "pdf": "https://doi.org/10.1016/j.eswa.2017.05.037"}, {"id": "e16831b6818a3ffec0785bac21911062ab04370e", "title": "HOG-assisted deep feature learning for pedestrian gender recognition", "year": "2018", "pdf": "http://doi.org/10.1016/j.jfranklin.2017.09.003"}, {"id": "03f3bde03f83c3ff4f346d761fde4ce031dd4c69", "title": "Deep Models Calibration with Bayesian Neural Networks", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/03f3/bde03f83c3ff4f346d761fde4ce031dd4c69.pdf"}, {"id": "af6e351d58dba0962d6eb1baf4c9a776eb73533f", "title": "How to Train Your Deep Neural Network with Dictionary Learning", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/af6e/351d58dba0962d6eb1baf4c9a776eb73533f.pdf"}, {"id": "6dcf418c778f528b5792104760f1fbfe90c6dd6a", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984"}, {"id": "772a30f1a7a3071e5ce6ad4b0dbddc67889f5873", "title": "FDAR-Net: Joint Convolutional Neural Networks for Face Detection and Attribute Recognition", "year": 2016, "pdf": null}, {"id": "e8b56ed34ece9b1739fff0df6af3b65390c468d3", "title": "Human injected by Botox age estimation based on active shape models, speed up robust features, and support vector machine", "year": 2016, "pdf": null}, {"id": "1fc88451a83f088ce028a0f715b9f9b600f4bd1c", "title": "Facial Attribute Recognition by Recurrent Learning With Visual Fixation.", "year": "2018", "pdf": "https://www.ncbi.nlm.nih.gov/pubmed/29993966"}, {"id": "e295c1aa47422eb35123053038e62e9aa50a2e3a", "title": "ChaLearn Looking at People 2015: Apparent Age and Cultural Event Recognition Datasets and Results", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406389"}, {"id": "7aa32e0639e0750e9eee3ce16e51e9f94241ae88", "title": "Automatic gender recognition for “in the wild” facial images using convolutional neural networks", "year": 2017, "pdf": null}, {"id": "e8951cc76af80da43e3528fe6d984071f17f57e7", "title": "Online Cost Efficient Customer Recognition System for Retail Analytics", "year": 2017, "pdf": "https://doi.org/10.1109/WACVW.2017.9"}, {"id": "6f6b4e2885ea1d9bea1bb2ed388b099a5a6d9b81", "title": "Structured Output SVM Prediction of Apparent Age, Gender and Smile from Deep Features", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.96"}, {"id": "29db16efc3b378c50511f743e5197a4c0b9e902f", "title": "Deeply Learned Rich Coding for Cross-Dataset Facial Age Estimation", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406401"}, {"id": "bc749f0e81eafe9e32d56336750782f45d82609d", "title": "Combination of Texture and Geometric Features for Age Estimation in Face Images", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/bc74/9f0e81eafe9e32d56336750782f45d82609d.pdf"}, {"id": "ecfb93de88394a244896bfe6ee7bf39fb250b820", "title": "Gender recognition from face images with deep learning", "year": 2017, "pdf": null}, {"id": "43836d69f00275ba2f3d135f0ca9cf88d1209a87", "title": "Effective hyperparameter optimization using Nelder-Mead method in deep learning", "year": 2017, "pdf": "https://doi.org/10.1186/s41074-017-0030-7"}, {"id": "9939498315777b40bed9150d8940fc1ac340e8ba", "title": "ChaLearn Looking at People and Faces of the World: Face AnalysisWorkshop and Challenge 2016", "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789583"}, {"id": "0435a34e93b8dda459de49b499dd71dbb478dc18", "title": "VEGAC: Visual Saliency-based Age, Gender, and Facial Expression Classification Using Convolutional Neural Networks", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/0435/a34e93b8dda459de49b499dd71dbb478dc18.pdf"}, {"id": "7173871866fc7e555e9123d1d7133d20577054e8", "title": "Simultaneous Adversarial Training - Learn from Others Mistakes", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.08108.pdf"}, {"id": "341ed69a6e5d7a89ff897c72c1456f50cfb23c96", "title": "DAGER: Deep Age, Gender and Emotion Recognition Using Convolutional Neural Network", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/cd7f/26c430363f90e530824446b3a4c85cfb94e5.pdf"}, {"id": "8355d095d3534ef511a9af68a3b2893339e3f96b", "title": "DEX: Deep EXpectation of Apparent Age from a Single Image", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390"}, {"id": "1135a818b756b057104e45d976546970ba84e612", "title": "Age, Gender, and Fine-Grained Ethnicity Prediction Using Convolutional Neural Networks for the East Asian Face Dataset", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.118"}, {"id": "5a4ec5c79f3699ba037a5f06d8ad309fb4ee682c", "title": "Automatic age and gender classification using supervised appearance model", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/2e36/a706bbec0f1adb7484e5d7416c3e612f43a1.pdf"}, {"id": "7aa4c16a8e1481629f16167dea313fe9256abb42", "title": "Multi-task learning for face identification and attribute estimation", "year": 2017, "pdf": "https://doi.org/10.1109/ICASSP.2017.7952703"}, {"id": "0deea943ac4dc1be822c02f97d0c6c97e201ba8d", "title": "Age category estimation using matching convolutional neural network", "year": 2018, "pdf": null}, {"id": "305346d01298edeb5c6dc8b55679e8f60ba97efb", "title": "Fine-Grained Face Annotation Using Deep Multi-Task CNN", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/3053/46d01298edeb5c6dc8b55679e8f60ba97efb.pdf"}, {"id": "d38b32d91d56b01c77ef4dd7d625ce5217c6950b", "title": "Unconstrained gender classification by multi-resolution LPQ and SIFT", "year": 2016, "pdf": null}, {"id": "4562ea84ebfc8d9864e943ed9e44d35997bbdf43", "title": "Small Sample Deep Learning for Newborn Gestational Age Estimation", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.19"}, {"id": "d4d1ac1cfb2ca703c4db8cc9a1c7c7531fa940f9", "title": "Gender estimation based on supervised HOG, Action Units and unsupervised CNN feature extraction", "year": 2017, "pdf": null}, {"id": "7361b900018f22e37499443643be1ff9d20edfd6", "title": "Predictive biometrics: a review and analysis of predicting personal characteristics from biometric data", "year": "2017", "pdf": "http://doi.org/10.1049/iet-bmt.2016.0169"}, {"id": "166186e551b75c9b5adcc9218f0727b73f5de899", "title": "Automatic Age and Gender Recognition in Human Face Image Dataset using Convolutional Neural Network System", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/1661/86e551b75c9b5adcc9218f0727b73f5de899.pdf"}, {"id": "9755554b13103df634f9b1ef50a147dd02eab02f", "title": "How Transferable Are CNN-Based Features for Age and Gender Classification?", "year": 2016, "pdf": "https://doi.org/10.1109/BIOSIG.2016.7736925"}, {"id": "f4373f5631329f77d85182ec2df6730cbd4686a9", "title": "Recognizing Gender from Human Facial Regions using Genetic Algorithm", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/f437/3f5631329f77d85182ec2df6730cbd4686a9.pdf"}, {"id": "1b29f23f3517ac5bbe9bf5e80cda741b61bb9b12", "title": "D2C: Deep cumulatively and comparatively learning for human age estimation", "year": 2017, "pdf": "https://doi.org/10.1016/j.patcog.2017.01.007"}, {"id": "2b60fe300735ea7c63f91c1121e89ba66040b833", "title": "A study on face recognition techniques with age and gender classification", "year": 2017, "pdf": null}, {"id": "3a05415356bd574cad1a9f1be21214e428bbc81b", "title": "Multinomial Naive Bayes for real-time gender recognition", "year": 2016, "pdf": null}, {"id": "d278e020be85a1ccd90aa366b70c43884dd3f798", "title": "Learning From Less Data: Diversified Subset Selection and Active Learning in Image Classification Tasks", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.11191.pdf"}, {"id": "0a325d70cc381b136a8f4e471b406cda6d27668c", "title": "A flexible hierarchical approach for facial age estimation based on multiple features", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/0a32/5d70cc381b136a8f4e471b406cda6d27668c.pdf"}, {"id": "10195a163ab6348eef37213a46f60a3d87f289c5", "title": "Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks", "year": 2016, "pdf": "https://doi.org/10.1007/s11263-016-0940-3"}, {"id": "775c15a5dfca426d53c634668e58dd5d3314ea89", "title": "Image Quality-aware Deep Networks Ensemble for Efficient Gender Recognition in the Wild", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/775c/15a5dfca426d53c634668e58dd5d3314ea89.pdf"}, {"id": "2cbb4a2f8fd2ddac86f8804fd7ffacd830a66b58", "title": "Age and gender classification using convolutional neural networks", "year": 2015, "pdf": "http://www.openu.ac.il/home/hassner/projects/cnn_agegender/CNN_AgeGenderEstimation.pdf"}, {"id": "f0ba5c89094b15469f95fd2a05a46b68b8faf1ca", "title": "Recognizing images across age progressions: A comprehensive review", "year": 2015, "pdf": null}, {"id": "4522a7268facecf05769e90cae6555ac70c05cc8", "title": "Auxiliary Demographic Information Assisted Age Estimation With Cascaded Structure", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8267475"}, {"id": "81e628a23e434762b1208045919af48dceb6c4d2", "title": "Attend and Rectify: A Gated Attention Mechanism for Fine-Grained Recovery", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.07320.pdf"}, {"id": "5c09d905f6d4f861624821bf9dfe2aae29137e9c", "title": "Women Also Snowboard: Overcoming Bias in Captioning Models", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.00517.pdf"}, {"id": "732e8d8f5717f8802426e1b9debc18a8361c1782", "title": "Unimodal Probability Distributions for Deep Ordinal Classification", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/732e/8d8f5717f8802426e1b9debc18a8361c1782.pdf"}, {"id": "cca476114c48871d05537abb303061de5ab010d6", "title": "A compact deep convolutional neural network architecture for video based age and gender estimation", "year": 2016, "pdf": "https://doi.org/10.15439/2016F472"}, {"id": "0cfca73806f443188632266513bac6aaf6923fa8", "title": "Predictive Uncertainty in Large Scale Classification using Dropout - Stochastic Gradient Hamiltonian Monte Carlo", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.04756.pdf"}, {"id": "9215d36c501d6ee57d74c1eeb1475efd800d92d3", "title": "An optimization framework of video advertising: using deep learning algorithm based on global image information", "year": 2018, "pdf": null}, {"id": "c5fff7adc5084d69390918daf09e832ec191144b", "title": "Deep learning application based on embedded GPU", "year": 2017, "pdf": null}, {"id": "d57dca4413ad4f33c97ae06a5a7fc86dc5a75f8b", "title": "Gender recognition: Methods, datasets and results", "year": 2015, "pdf": "http://iplab.dmi.unict.it/sites/default/files/_11.pdf"}, {"id": "00a38ebce124879738b04ffc1536018e75399193", "title": "Convolutional neural network for age classification from smart-phone based ocular images", "year": 2017, "pdf": "https://doi.org/10.1109/BTAS.2017.8272766"}, {"id": "e7b6887cd06d0c1aa4902335f7893d7640aef823", "title": "Modelling of Facial Aging and Kinship: A Survey", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/e7b6/887cd06d0c1aa4902335f7893d7640aef823.pdf"}, {"id": "f4003cbbff3b3d008aa64c76fed163c10d9c68bd", "title": "Compass local binary patterns for gender recognition of facial photographs and sketches", "year": 2016, "pdf": "https://doi.org/10.1016/j.neucom.2016.08.055"}, {"id": "c254b4c0f6d5a5a45680eb3742907ec93c3a222b", "title": "A Fusion-based Gender Recognition Method Using Facial Images", "year": "2018", "pdf": "https://arxiv.org/pdf/1711.06451.pdf"}, {"id": "17de5a9ce09f4834629cd76b8526071a956c9c6d", "title": "Smart Parental Advisory: A Usage Control and Deep Learning-Based Framework for Dynamic Parental Control on Smart TV", "year": 2017, "pdf": "https://doi.org/10.1007/978-3-319-68063-7_8"}, {"id": "321db1059032b828b223ca30f3304257f0c41e4c", "title": "Comparative evaluation of age classification from facial images", "year": 2015, "pdf": "https://doi.org/10.1109/ICACCI.2015.7275951"}, {"id": "6c0ad77af4c0850bd01bb118e175ecc313476f27", "title": "Extended multi-spectral face recognition across two different age groups: an empirical study", "year": 2016, "pdf": "http://doi.acm.org/10.1145/3009977.3010026"}, {"id": "cfdc632adcb799dba14af6a8339ca761725abf0a", "title": "Probabilistic Formulations of Regression with Mixed Guidance", "year": "2016", "pdf": "https://arxiv.org/pdf/1804.01575.pdf"}, {"id": "1277b1b8b609a18b94e4907d76a117c9783a5373", "title": "VirtualIdentity: Privacy preserving user profiling", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ASONAM.2016.7752438"}, {"id": "b839bc95794dc65340b6e5fea098fa6e6ea5e430", "title": "Soft Biometrics in Online Social Networks: A Case Study on Twitter User Gender Recognition", "year": 2017, "pdf": "https://doi.org/10.1109/WACVW.2017.8"}, {"id": "63a4105adbe182e67d8fd324de5c84a6df444294", "title": "Gender classification by LPQ features from intensity and Monogenic images", "year": 2017, "pdf": null}, {"id": "42a5dc91852c8c14ed5f4c3b451c9dc98348bc02", "title": "A Data Augmentation Methodology to Improve Age Estimation Using Convolutional Neural Networks", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2016.021"}, {"id": "fffefc1fb840da63e17428fd5de6e79feb726894", "title": "Fine-Grained Age Estimation in the wild with Attention LSTM Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.10445.pdf"}, {"id": "c9c2de3628be7e249722b12911bebad84b567ce6", "title": "Age and gender recognition in the wild with deep attention", "year": 2017, "pdf": "https://doi.org/10.1016/j.patcog.2017.06.028"}, {"id": "ea227e47b8a1e8f55983c34a17a81e5d3fa11cfd", "title": "Age group classification in the wild with deep RoR architecture", "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296549"}, {"id": "cdae8e9cc9d605856cf5709b2fdf61f722d450c1", "title": "Deep Learning for Biometrics : A Survey KALAIVANI SUNDARARAJAN", "year": "2018", "pdf": null}, {"id": "ab3fcd9d5fbd2d0ad48fba4005899cf13e08d07e", "title": "Evaluating Automated Facial Age Estimation Techniques for Digital Forensics", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8424644"}]}
\ No newline at end of file diff --git a/site/datasets/unknown/aflw.json b/site/datasets/unknown/aflw.json new file mode 100644 index 00000000..138182a2 --- /dev/null +++ b/site/datasets/unknown/aflw.json @@ -0,0 +1 @@ +{"id": "a74251efa970b92925b89eeef50a5e37d9281ad0", "paper": {"paper_id": "a74251efa970b92925b89eeef50a5e37d9281ad0", "key": "aflw", "title": "Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization", "year": 2011, "pdf": "http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf", "address": "", "name": "AFLW"}, "citations": [{"id": "31e57fa83ac60c03d884774d2b515813493977b9", "title": "Face alignment with cascaded semi-parametric deep greedy neural forests", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/31e5/7fa83ac60c03d884774d2b515813493977b9.pdf"}, {"id": "a422f2d0212f54807ff678f209293a27c7791ec5", "title": "Adaptive image downsampling preprocessor for artificial neural networks", "year": 2017, "pdf": null}, {"id": "e4754afaa15b1b53e70743880484b8d0736990ff", "title": "300 Faces In-The-Wild Challenge: database and results", "year": "2016", "pdf": "http://doi.org/10.1016/j.imavis.2016.01.002"}, {"id": "d9c0310203179d5328c4f1475fa4d68c5f0c7324", "title": "Face Analysis in the Wild", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI-T.2017.11"}, {"id": "d1777d3ea950e6aac92dd359075701bc28ba1cb2", "title": "Dynamic Facial Analysis: From Bayesian Filtering to Recurrent Neural Network", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099650"}, {"id": "9d5db7427b44d83bf036ff4cff382c23c6c7b6d8", "title": "Video redaction: a survey and comparison of enabling technologies", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/f78d/6d092deaafee550fb58d6ea6e8b559325876.pdf"}, {"id": "38192a0f9261d9727b119e294a65f2e25f72d7e6", "title": "Facial feature point detection: A comprehensive survey", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/3819/2a0f9261d9727b119e294a65f2e25f72d7e6.pdf"}, {"id": "8397956c7ad3bd24c6c6c0b38866e165367327c0", "title": "Social Relation Trait Discovery from Visual LifeLog Data with Facial Multi-Attribute Framework", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/8397/956c7ad3bd24c6c6c0b38866e165367327c0.pdf"}, {"id": "a4de780b8b333f073667c1f2dd7b3ae54a97f380", "title": "Research on Abnormal Behavior Detection of Online Examination Based on Image Information", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8530188"}, {"id": "f7dea4454c2de0b96ab5cf95008ce7144292e52a", "title": "Facial Landmark Detection: A Literature Survey", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.05563.pdf"}, {"id": "06bd34951305d9f36eb29cf4532b25272da0e677", "title": "A Fast and Accurate System for Face Detection, Identification, and Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.07586.pdf"}, {"id": "0ba64f4157d80720883a96a73e8d6a5f5b9f1d9b", "title": "Convolutional Point-set Representation: A Convolutional Bridge Between a Densely Annotated Image and 3D Face Alignment", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/84b7/e2138a3701432c33ea70a1297328cd814ab5.pdf"}, {"id": "31b05f65405534a696a847dd19c621b7b8588263", "title": "UMDFaces: An annotated face dataset for training deep networks", "year": 2017, "pdf": "http://arxiv.org/abs/1611.01484"}, {"id": "2f16459e2e24dc91b3b4cac7c6294387d4a0eacf", "title": "Fast Deep Convolutional Face Detection in the Wild Exploiting Hard Sample Mining", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/2f16/459e2e24dc91b3b4cac7c6294387d4a0eacf.pdf"}, {"id": "c1e76c6b643b287f621135ee0c27a9c481a99054", "title": "Multi-point Regression Voting for Shape Model Matching", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/c1e7/6c6b643b287f621135ee0c27a9c481a99054.pdf"}, {"id": "174930cac7174257515a189cd3ecfdd80ee7dd54", "title": "Multi-view Face Detection Using Deep Convolutional Neural Networks", "year": 2015, "pdf": "http://doi.acm.org/10.1145/2671188.2749408"}, {"id": "2aea27352406a2066ddae5fad6f3f13afdc90be9", "title": "Bottom-Up and Top-Down Reasoning with Hierarchical Rectified Gaussians", "year": 2016, "pdf": "http://arxiv.org/pdf/1507.05699v4.pdf"}, {"id": "ba397fe5d4f0beaa7370b88e9875dbba19aa7bfc", "title": "SmileNet: Registration-Free Smiling Face Detection In The Wild", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265396"}, {"id": "fce1c3f4948cf300694c18c3fcc5486cd060af13", "title": "Head Pose Estimation on Low-Quality Images", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373880"}, {"id": "5b90bf3ebad1583beebcae5f892db2add248bcad", "title": ", C . F . F . Costa Filho and M . G . F . Costa Evaluation of Haar Cascade Classifiers Designed for Face Detection", "year": "", "pdf": "http://pdfs.semanticscholar.org/5b90/bf3ebad1583beebcae5f892db2add248bcad.pdf"}, {"id": "8cc07ae9510854ec6e79190cc150f9f1fe98a238", "title": "Using Deep Learning to Challenge Safety Standard for Highly Autonomous Machines in Agriculture", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/8cc0/7ae9510854ec6e79190cc150f9f1fe98a238.pdf"}, {"id": "1606b1475e125bba1b2d87bcf1e33b06f42c5f0d", "title": "A convolutional neural network cascade for face detection", "year": 2015, "pdf": "http://users.eecs.northwestern.edu/~xsh835/CVPR2015_CasCNN.pdf"}, {"id": "830e5b1043227fe189b3f93619ef4c58868758a7", "title": "A survey on face detection in the wild: Past, present and future", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/830e/5b1043227fe189b3f93619ef4c58868758a7.pdf"}, {"id": "22f656d0f8426c84a33a267977f511f127bfd7f3", "title": "From Facial Expression Recognition to Interpersonal Relation Prediction", "year": 2017, "pdf": "http://arxiv.org/abs/1609.06426"}, {"id": "02567fd428a675ca91a0c6786f47f3e35881bcbd", "title": "Deep Label Distribution Learning With Label Ambiguity", "year": 2017, "pdf": "https://arxiv.org/pdf/1611.01731.pdf"}, {"id": "136f92989e982ecf795cb27d65b48464eaec9323", "title": "Joint face alignment and segmentation via deep multi-task learning", "year": 2018, "pdf": null}, {"id": "95ea564bd983129ddb5535a6741e72bb1162c779", "title": "Multi-Task Learning by Deep Collaboration and Application in Facial Landmark Detection", "year": "2017", "pdf": "https://arxiv.org/pdf/1711.00111.pdf"}, {"id": "b6bbaa26f19ced1ce357d5bce903d772d5a49102", "title": "Privileged Information-Based Conditional Structured Output Regression Forest for Facial Point Detection", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7005459"}, {"id": "9944c451b4a487940d3fd8819080fe16d627892d", "title": "Human face shape analysis under spherical harmonics illumination considering self occlusion", "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612967"}, {"id": "a1f40bcfadbeee66f67ab0755dd3037c030a7450", "title": "Face Image Quality Assessment Based on Learning to Rank", "year": 2015, "pdf": "https://doi.org/10.1109/LSP.2014.2347419"}, {"id": "bd13f50b8997d0733169ceba39b6eb1bda3eb1aa", "title": "Occlusion Coherence: Detecting and Localizing Occluded Faces", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/bd13/f50b8997d0733169ceba39b6eb1bda3eb1aa.pdf"}, {"id": "b3e521baceadee36ac22b6a06266e8abd6a701f7", "title": "Occlusion-Aware 3D Morphable Models and an Illumination Prior for Face Image Analysis", "year": "2018", "pdf": "http://doi.org/10.1007/s11263-018-1064-8"}, {"id": "78f2c8671d1a79c08c80ac857e89315197418472", "title": "Recurrent 3D-2D Dual Learning for Large-Pose Facial Landmark Detection", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237443"}, {"id": "bb070c019c0885232f114c7dca970d2afd9cd828", "title": "A Novel Landmark Detector System for Multi Resolution Frontal Faces", "year": 2014, "pdf": "https://doi.org/10.1109/DICTA.2014.7008089"}, {"id": "6eba25166fe461dc388805cc2452d49f5d1cdadd", "title": "ALBANIE, VEDALDI: LEARNING GRIMACES BY WATCHING TV 1 Learning Grimaces by Watching TV", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/6eba/25166fe461dc388805cc2452d49f5d1cdadd.pdf"}, {"id": "02bd665196bd50c4ecf05d6852a4b9ba027cd9d0", "title": "Feature Selection with Annealing for Computer Vision and Big Data Learning", "year": 2016, "pdf": "http://arxiv.org/pdf/1310.2880v6.pdf"}, {"id": "a66d89357ada66d98d242c124e1e8d96ac9b37a0", "title": "Failure Detection for Facial Landmark Detectors", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/a66d/89357ada66d98d242c124e1e8d96ac9b37a0.pdf"}, {"id": "726b8aba2095eef076922351e9d3a724bb71cb51", "title": "3DFaceNet: Real-time Dense Face Reconstruction via Synthesizing Photo-realistic Face Images", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/d06b/cb2d46342ee011e652990edf290a0876b502.pdf"}, {"id": "9cc8cf0c7d7fa7607659921b6ff657e17e135ecc", "title": "Detecting Masked Faces in the Wild with LLE-CNNs", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099536"}, {"id": "419fec1a76d9233dcaa8d2c98ea622d19f663261", "title": "Unsupervised learning of object frames by dense equivariant image labelling", "year": "2017", "pdf": "https://arxiv.org/pdf/1706.02932.pdf"}, {"id": "527dda77a3864d88b35e017d542cb612f275a4ec", "title": "Facial 3D model registration under occlusions with sensiblepoints-based reinforced hypothesis refinement", "year": 2017, "pdf": "https://arxiv.org/pdf/1709.00531v1.pdf"}, {"id": "446a99fdedd5bb32d4970842b3ce0fc4f5e5fa03", "title": "A Pose-Adaptive Constrained Local Model for Accurate Head Pose Tracking", "year": 2014, "pdf": "http://www.isir.upmc.fr/files/2014ACTI3172.pdf"}, {"id": "fd96432675911a702b8a4ce857b7c8619498bf9f", "title": "Improved Face Detection and Alignment using Cascade Deep Convolutional Network", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/fd96/432675911a702b8a4ce857b7c8619498bf9f.pdf"}, {"id": "c3a3f7758bccbead7c9713cb8517889ea6d04687", "title": "Funnel-structured cascade for multi-view face detection with alignment-awareness", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/c3a3/f7758bccbead7c9713cb8517889ea6d04687.pdf"}, {"id": "dfaa547451aae219cd2ca7a761e6c16c1e1d0add", "title": "Representation Learning by Rotating Your Faces", "year": "2018", "pdf": "https://arxiv.org/pdf/1705.11136.pdf"}, {"id": "5fa6e4a23da0b39e4b35ac73a15d55cee8608736", "title": "RED-Net: A Recurrent Encoder\u2013Decoder Network for Video-Based Face Alignment", "year": "2018", "pdf": "https://arxiv.org/pdf/1801.06066.pdf"}, {"id": "e8c6853135856515fc88fff7c55737a292b0a15b", "title": "BoxFlow: Unsupervised Face Detector Adaptation from Images to Videos", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.46"}, {"id": "cc31db984282bb70946f6881bab741aa841d3a7c", "title": "Learning Grimaces by Watching TV", "year": "2016", "pdf": "https://arxiv.org/pdf/1610.02255.pdf"}, {"id": "614a7c42aae8946c7ad4c36b53290860f6256441", "title": "Joint Face Detection and Alignment Using Multitask Cascaded Convolutional Networks", "year": 2016, "pdf": "https://arxiv.org/pdf/1604.02878.pdf"}, {"id": "f070d739fb812d38571ec77490ccd8777e95ce7a", "title": "Hierarchical facial landmark localization via cascaded random binary patterns", "year": "2015", "pdf": "http://doi.org/10.1016/j.patcog.2014.09.007"}, {"id": "390f3d7cdf1ce127ecca65afa2e24c563e9db93b", "title": "Learning Deep Representation for Face Alignment with Auxiliary Attributes", "year": 2016, "pdf": "https://arxiv.org/pdf/1408.3967v2.pdf"}, {"id": "1885acea0d24e7b953485f78ec57b2f04e946eaf", "title": "Combining Local and Global Features for 3D Face Tracking", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.297"}, {"id": "1efaa128378f988965841eb3f49d1319a102dc36", "title": "Hierarchical binary CNNs for landmark localization with limited resources", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.04803.pdf"}, {"id": "d689063294e217f1ec8b83fe4b60e706f1934787", "title": "Simultaneous Face Detection and Pose Estimation Using Convolutional Neural Network Cascade", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8458127"}, {"id": "969dd8bc1179c047523d257516ade5d831d701ad", "title": "A weakly supervised method for makeup-invariant face verification", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/969d/d8bc1179c047523d257516ade5d831d701ad.pdf"}, {"id": "c13211a15abd3ca187ef36b9f816891f901ba788", "title": "A multi-channel projection haar features method for face detection", "year": 2017, "pdf": null}, {"id": "26ac607a101492bc86fd81a141311066cfe9e2b5", "title": "Sieving Regression Forest Votes for Facial Feature Detection in the Wild", "year": 2013, "pdf": "http://www.eecs.qmul.ac.uk/~hy300/papers/YangPatrasiccv2013.pdf"}, {"id": "1f8e44593eb335c2253d0f22f7f9dc1025af8c0d", "title": "Fine-Tuning Regression Forests Votes for Object Alignment in the Wild", "year": 2015, "pdf": "https://qmro.qmul.ac.uk/xmlui/bitstream/handle/123456789/22607/Patras%20Fine-tuning%20regression%202014%20Accepted.pdf?sequence=1"}, {"id": "9ab963e473829739475b9e47514f454ab467a5af", "title": "A Fully End-to-End Cascaded CNN for Facial Landmark Detection", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.33"}, {"id": "faead8f2eb54c7bc33bc7d0569adc7a4c2ec4c3b", "title": "Combining Data-driven and Model-driven Methods for Robust Facial Landmark Detection", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/faea/d8f2eb54c7bc33bc7d0569adc7a4c2ec4c3b.pdf"}, {"id": "c5ea084531212284ce3f1ca86a6209f0001de9d1", "title": "Audio-visual speech processing for multimedia localisation", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/c5ea/084531212284ce3f1ca86a6209f0001de9d1.pdf"}, {"id": "28bac5bc6e8a15f704563c5cb723b7c71f5413fa", "title": "Analysis for Self-taught and Transfer Learning Based Approaches for Emotion Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8474199"}, {"id": "17c0d99171efc957b88c31a465c59485ab033234", "title": "To learn image super-resolution, use a GAN to learn how to do image degradation first", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11458.pdf"}, {"id": "622c84d79a9420ed6f3a78f29233d56b1e99cc21", "title": "Video attention prediction using gaze saliency", "year": 2016, "pdf": null}, {"id": "b0c512fcfb7bd6c500429cbda963e28850f2e948", "title": "A Fast and Accurate Unconstrained Face Detector", "year": "2016", "pdf": "https://arxiv.org/pdf/1408.1656.pdf"}, {"id": "b8378ab83bc165bc0e3692f2ce593dcc713df34a", "title": "A 3D Approach to Facial Landmarks: Detection, Refinement, and Tracking", "year": 2014, "pdf": "http://cmp.felk.cvut.cz/ftp/articles/cech/Cech-ICPR-2014.pdf"}, {"id": "91883dabc11245e393786d85941fb99a6248c1fb", "title": "Face alignment in-the-wild: A Survey", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/9188/3dabc11245e393786d85941fb99a6248c1fb.pdf"}, {"id": "a81da7746f4f58e7211e65f11e6520144f8c003d", "title": "Facial Landmark Localization in the Wild by Backbone-Branches Representation Learning", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8499059"}, {"id": "5b2cfee6e81ef36507ebf3c305e84e9e0473575a", "title": "GoDP: Globally Optimized Dual Pathway deep network architecture for facial landmark localization in-the-wild", "year": "2018", "pdf": "https://arxiv.org/pdf/1704.02402.pdf"}, {"id": "8f772d9ce324b2ef5857d6e0b2a420bc93961196", "title": "Facial Landmark Point Localization using Coarse-to-Fine Deep Recurrent Neural Network", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.01760.pdf"}, {"id": "e11bc0f7c73c04d38b7fb80bd1ca886495a4d43c", "title": "\u201cA Leopard Cannot Change Its Spots\u201d: Improving Face Recognition Using 3D-Based Caricatures", "year": "2019", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8382306"}, {"id": "31af1f2614823504d1d643d1b019c6f9d2150b15", "title": "Super-FAN: Integrated facial landmark localization and super-resolution of real-world low resolution faces in arbitrary poses with GANs", "year": "2017", "pdf": "https://arxiv.org/pdf/1712.02765.pdf"}, {"id": "488676e61fcf7b79d83c25fb103c8d8a854d8987", "title": "Leveraging Convolutional Pose Machines for Fast and Accurate Head Pose Estimation", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/4886/76e61fcf7b79d83c25fb103c8d8a854d8987.pdf"}, {"id": "3d78c144672c4ee76d92d21dad012bdf3c3aa1a0", "title": "Unconstrained Still/Video-Based Face Verification with Deep Convolutional Neural Networks", "year": "2017", "pdf": "http://doi.org/10.1007/s11263-017-1029-3"}, {"id": "6f7d06ced04ead3b9a5da86b37e7c27bfcedbbdd", "title": "Multi-Scale Fully Convolutional Network for Fast Face Detection", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/6f7d/06ced04ead3b9a5da86b37e7c27bfcedbbdd.pdf"}, {"id": "d06bcb2d46342ee011e652990edf290a0876b502", "title": "Photo-realistic Face Images Synthesis for Learning-based Fine-scale 3D Face Reconstruction", "year": "2017", "pdf": "http://arxiv.org/abs/1708.00980"}, {"id": "e5737ffc4e74374b0c799b65afdbf0304ff344cb", "title": "A literature survey on robust and efficient eye localization in real-life scenarios", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/e573/7ffc4e74374b0c799b65afdbf0304ff344cb.pdf"}, {"id": "6ad107c08ac018bfc6ab31ec92c8a4b234f67d49", "title": "Supervision-by-Registration: An Unsupervised Approach to Improve the Precision of Facial Landmark Detectors", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.00966.pdf"}, {"id": "84a20d0a47c0d826b77f73075530d618ba7573d2", "title": "Look at Boundary: A Boundary-Aware Face Alignment Algorithm", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.10483.pdf"}, {"id": "73c9cbbf3f9cea1bc7dce98fce429bf0616a1a8c", "title": "Unsupervised Learning of Object Landmarks by Factorized Spatial Embeddings", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.02193.pdf"}, {"id": "66490b5869822b31d32af7108eaff193fbdb37b0", "title": "Cascade Multi-View Hourglass Model for Robust 3D Face Alignment", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373857"}, {"id": "363e5a0e4cd857e98de72a726ad6f80cea9c50ab", "title": "Fast Landmark Localization With 3D Component Reconstruction and CNN for Cross-Pose Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1708.09580.pdf"}, {"id": "4ac3cd8b6c50f7a26f27eefc64855134932b39be", "title": "Robust Facial Landmark Detection via a Fully-Convolutional Local-Global Context Network", "year": "", "pdf": "https://pdfs.semanticscholar.org/4ac3/cd8b6c50f7a26f27eefc64855134932b39be.pdf"}, {"id": "04dcdb7cb0d3c462bdefdd05508edfcff5a6d315", "title": "Assisting the training of deep neural networks with applications to computer vision", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/04dc/db7cb0d3c462bdefdd05508edfcff5a6d315.pdf"}, {"id": "3352426a67eabe3516812cb66a77aeb8b4df4d1b", "title": "Joint Multi-view Face Alignment in the Wild", "year": "2017", "pdf": "https://arxiv.org/pdf/1708.06023.pdf"}, {"id": "daa4cfde41d37b2ab497458e331556d13dd14d0b", "title": "Multi-view Constrained Local Models for Large Head Angle Facial Tracking", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406477"}, {"id": "24e099e77ae7bae3df2bebdc0ee4e00acca71250", "title": "Robust Face Alignment Under Occlusion via Regional Predictive Power Estimation", "year": 2015, "pdf": "https://qmro.qmul.ac.uk/xmlui/bitstream/handle/123456789/22467/Yang%20Robust%20Face%20Alignment%20Under%20Occlusion%20via%20Regional%20Predictive%20Power%20Estimation%202015%20Accepted.pdf?sequence=1"}, {"id": "7d73adcee255469aadc5e926066f71c93f51a1a5", "title": "Face alignment by deep convolutional network with adaptive learning rate", "year": 2016, "pdf": "http://mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0001283.pdf"}, {"id": "15aa6c457678e25f6bc0e818e5fc39e42dd8e533", "title": "Conditional Image Generation for Learning the Structure of Visual Objects", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.07823.pdf"}, {"id": "3bb670b2afdcc45da2b09a02aac07e22ea7dbdc2", "title": "Disentangling 3 D Pose in A Dendritic CNN for Unconstrained 2 D Face Alignment", "year": "2018", "pdf": null}, {"id": "bdfcc45cfa495939789b73eec7e6e98a4d7e3f41", "title": "A Real-Time Face Detector Based on an End-to-End CNN", "year": 2017, "pdf": null}, {"id": "c3a53b308c7a75c66759cbfdf52359d9be4f552b", "title": "On Detecting Partially Occluded Faces with Pose Variations", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ISPAN-FCST-ISCC.2017.16"}, {"id": "795ea140df2c3d29753f40ccc4952ef24f46576c", "title": "Multi-Task Learning by Deep Collaboration and Application in Facial Landmark Detection", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/795e/a140df2c3d29753f40ccc4952ef24f46576c.pdf"}, {"id": "ef4b5bcaad4c36d7baa7bc166bd1712634c7ad71", "title": "Towards Spatio-temporal Face Alignment in Unconstrained Conditions", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/ef4b/5bcaad4c36d7baa7bc166bd1712634c7ad71.pdf"}, {"id": "244b57cc4a00076efd5f913cc2833138087e1258", "title": "Warped Convolutions: Efficient Invariance to Spatial Transformations", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/dfa8/d0afc548a8086902412fb0eae0fcf881ed8a.pdf"}, {"id": "113b06e70b7eead8ae7450bafe9c91656705024c", "title": "Face Alignment across Large Pose via MT-CNN Based 3D Shape Reconstruction", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373832"}, {"id": "cd4c047f4d4df7937aff8fc76f4bae7718004f40", "title": "Background modeling for generative image models", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/cd4c/047f4d4df7937aff8fc76f4bae7718004f40.pdf"}, {"id": "853fc1794892175e2318f55785ca8e2ce6fd7537", "title": "FHEDN: A context modeling Feature Hierarchy Encoder-Decoder Network for face detection", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8489507"}, {"id": "0f9dd79de75a3dce394846369f09c05ddf250e31", "title": "An accurate and real-time multi-view face detector using ORFs and doubly domain-partitioning classifier", "year": 2018, "pdf": null}, {"id": "9ea992f009492888c482d5f4006281eaa8b758e7", "title": "X2Face: A Network for Controlling Face Generation Using Images, Audio, and Pose Codes", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.10550.pdf"}, {"id": "31d51e48dbd9e7253eafe0719f3788adb564a971", "title": "Visual Phrases for Exemplar Face Detection", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410588"}, {"id": "b558be7e182809f5404ea0fcf8a1d1d9498dc01a", "title": "Bottom-up and top-down reasoning with convolutional latent-variable models", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/dc8a/57827ffbe7064979638cf909abf7fcf7fb8d.pdf"}, {"id": "2e3d081c8f0e10f138314c4d2c11064a981c1327", "title": "A Comprehensive Performance Evaluation of Deformable Face Tracking \u201cIn-the-Wild\u201d", "year": 2017, "pdf": "http://arxiv.org/pdf/1603.06015v1.pdf"}, {"id": "5c0d105cfcc78d689d948a2aa8d654cab4e545f2", "title": "A Cascaded Framework for Model-Based 3D Face Reconstruction", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462692"}, {"id": "3d6f59e0f0e16d01b9c588a53d3b6b3b984e991e", "title": "Learning Local Responses of Facial Landmarks with Conditional Variational Auto-Encoder for Face Alignment", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.117"}, {"id": "3d62b2f9cef997fc37099305dabff356d39ed477", "title": "Joint Face Alignment and 3D Face Reconstruction with Application to Face Recognition", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/3d62/b2f9cef997fc37099305dabff356d39ed477.pdf"}, {"id": "407a26fff7fac195b74de9fcb556005e8785a4e9", "title": "Nested Shallow CNN-Cascade for Face Detection in the Wild", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.29"}, {"id": "45e7ddd5248977ba8ec61be111db912a4387d62f", "title": "Adversarial Learning of Structure-Aware Fully Convolutional Networks for Landmark Localization", "year": "2017", "pdf": "https://arxiv.org/pdf/1711.00253.pdf"}, {"id": "7384c39a2d084c93566b98bc4d81532b5ad55892", "title": "A Comparative Study of Face Landmarking Techniques", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/d0a5/0940a1bf951adaf22bd1fc72ea861b606cdb.pdf"}, {"id": "878301453e3d5cb1a1f7828002ea00f59cbeab06", "title": "Faceness-Net: Face Detection through Deep Facial Part Responses", "year": "2018", "pdf": "https://arxiv.org/pdf/1701.08393.pdf"}, {"id": "e6178de1ef15a6a973aad2791ce5fbabc2cb8ae5", "title": "Improving Facial Landmark Detection via a Super-Resolution Inception Network", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/e617/8de1ef15a6a973aad2791ce5fbabc2cb8ae5.pdf"}, {"id": "07d95be4922670ef2f8b11997e0c00eb643f3fca", "title": "The First Facial Landmark Tracking in-the-Wild Challenge: Benchmark and Results", "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.132"}, {"id": "29c5a44e01d1126505471b2ab46163d598c871c7", "title": "Improving Landmark Localization with Semi-Supervised Learning", "year": "2017", "pdf": "https://arxiv.org/pdf/1709.01591.pdf"}, {"id": "1b0a071450c419138432c033f722027ec88846ea", "title": "Looking at faces in a vehicle: A deep CNN based approach and evaluation", "year": 2016, "pdf": "https://doi.org/10.1109/ITSC.2016.7795622"}, {"id": "86c053c162c08bc3fe093cc10398b9e64367a100", "title": "Cascade of forests for face alignment", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/86c0/53c162c08bc3fe093cc10398b9e64367a100.pdf"}, {"id": "22137ce9c01a8fdebf92ef35407a5a5d18730dde", "title": "Recognition of Faces from single and Multi-View Videos", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/2213/7ce9c01a8fdebf92ef35407a5a5d18730dde.pdf"}, {"id": "3176ee88d1bb137d0b561ee63edf10876f805cf0", "title": "Recombinator Networks: Learning Coarse-to-Fine Feature Aggregation", "year": "2016", "pdf": "https://arxiv.org/pdf/1511.07356.pdf"}, {"id": "8fda2f6b85c7e34d3e23927e501a4b4f7fc15b2a", "title": "Feature Selection with Annealing for Big Data Learning", "year": "2014", "pdf": "https://pdfs.semanticscholar.org/8fda/2f6b85c7e34d3e23927e501a4b4f7fc15b2a.pdf"}, {"id": "ada56c9ceef50aa5159f1f8aa45ca2040d1ed15c", "title": "Soft Biometrics: Globally Coherent Solutions for Hair Segmentation and Style Recognition Based on Hierarchical MRFs", "year": 2017, "pdf": "https://doi.org/10.1109/TIFS.2017.2680246"}, {"id": "01bef320b83ac4405b3fc5b1cff788c124109fb9", "title": "Translating Head Motion into Attention - Towards Processing of Student's Body-Language", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/49e4/37cc5b673c49b942e304607a0050dcc82dae.pdf"}, {"id": "74ba4ab407b90592ffdf884a20e10006d2223015", "title": "Partial Face Detection in the Mobile Domain", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/74ba/4ab407b90592ffdf884a20e10006d2223015.pdf"}, {"id": "15ebec3796a2e23d31c8c8ddf6d21555be6eadc6", "title": "Recent Advances in Object Detection in the Age of Deep Convolutional Neural Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.03193.pdf"}, {"id": "9207671d9e2b668c065e06d9f58f597601039e5e", "title": "Face Detection Using a 3D Model on Face Keypoints", "year": "2014", "pdf": "https://pdfs.semanticscholar.org/9207/671d9e2b668c065e06d9f58f597601039e5e.pdf"}, {"id": "22c06284a908d8ad0994ad52119773a034eed7ee", "title": "Adaptive Visual Feedback Generation for Facial Expression Improvement with Multi-task Deep Neural Networks", "year": 2016, "pdf": "http://doi.acm.org/10.1145/2964284.2967236"}, {"id": "0561bed18b6278434deae562d646e8adad72e75d", "title": "Low rank driven robust facial landmark regression", "year": 2015, "pdf": "https://doi.org/10.1016/j.neucom.2014.09.052"}, {"id": "2f5ae4d6cd240ec7bc3f8ada47030e8439125df2", "title": "Efficient Boosted Exemplar-Based Face Detection", "year": 2014, "pdf": "http://users.eecs.northwestern.edu/~xsh835/CVPR14_ExemplarFaceDetection.pdf"}, {"id": "a0fd85b3400c7b3e11122f44dc5870ae2de9009a", "title": "Learning Deep Representation for Face Alignment with Auxiliary Attributes", "year": "2016", "pdf": "https://arxiv.org/pdf/1408.3967.pdf"}, {"id": "df51dfe55912d30fc2f792561e9e0c2b43179089", "title": "Face Hallucination Using Linear Models of Coupled Sparse Support", "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2717181"}, {"id": "293ade202109c7f23637589a637bdaed06dc37c9", "title": "Material for : Adaptive Cascaded Regression", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/293a/de202109c7f23637589a637bdaed06dc37c9.pdf"}, {"id": "d42dbc995318e2936714c65c028700bfd3633049", "title": "Face fiducial detection by consensus of exemplars", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477592"}, {"id": "64e82b42e1c41250bdf9eb952686631287cfd410", "title": "Evaluating the Quality of Face Alignment without Ground Truth", "year": 2015, "pdf": "https://doi.org/10.1111/cgf.12760"}, {"id": "a5e5094a1e052fa44f539b0d62b54ef03c78bf6a", "title": "Detection without Recognition for Redaction", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/a5e5/094a1e052fa44f539b0d62b54ef03c78bf6a.pdf"}, {"id": "6f9026627fb31d4cfb08dbcc4ab852945dc42252", "title": "Joint Head Pose/Soft Label Estimation for Human Recognition <italic>In-The-Wild</italic>", "year": 2016, "pdf": null}, {"id": "5aafca76dbbbbaefd82f5f0265776afb5320dafe", "title": "Empirical analysis of cascade deformable models for multi-view face detection", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/5aaf/ca76dbbbbaefd82f5f0265776afb5320dafe.pdf"}, {"id": "3dfbd17bd9caf7bd1d908ff469dec2b61e8a9548", "title": "Usability of Pilot's Gaze in Aeronautic Cockpit for Safer Aircraft", "year": 2015, "pdf": "https://doi.org/10.1109/ITSC.2015.252"}, {"id": "12055b8f82d5411f9ad196b60698d76fbd07ac1e", "title": "Multiview Facial Landmark Localization in RGB-D Images via Hierarchical Regression With Binary Patterns", "year": 2014, "pdf": "https://zhzhanp.github.io/papers/TCSVT2014.pdf"}, {"id": "5a3da29970d0c3c75ef4cb372b336fc8b10381d7", "title": "CNN-based Real-time Dense Face Reconstruction with Inverse-rendered Photo-realistic Face Images.", "year": "2018", "pdf": "https://arxiv.org/pdf/1708.00980.pdf"}]}
\ No newline at end of file diff --git a/site/datasets/unknown/casia_webface.json b/site/datasets/unknown/casia_webface.json new file mode 100644 index 00000000..dd34b1cb --- /dev/null +++ b/site/datasets/unknown/casia_webface.json @@ -0,0 +1 @@ +{"id": "853bd61bc48a431b9b1c7cab10c603830c488e39", "paper": {"paper_id": "853bd61bc48a431b9b1c7cab10c603830c488e39", "key": "casia_webface", "title": "Learning Face Representation from Scratch", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf", "address": "", "name": "CASIA Webface"}, "citations": [{"id": "8699268ee81a7472a0807c1d3b1db0d0ab05f40d", "title": "Channel-Recurrent Autoencoding for Image Modeling", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/8699/268ee81a7472a0807c1d3b1db0d0ab05f40d.pdf"}, {"id": "e4232e8fd566a7289ccb33f732c9093c9beb84a6", "title": "UHDB31: A Dataset for Better Understanding Face Recognition Across Pose and Illumination Variation", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265511"}, {"id": "98c548a4be0d3b62971e75259d7514feab14f884", "title": "Deep generative-contrastive networks for facial expression recognition", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/98c5/48a4be0d3b62971e75259d7514feab14f884.pdf"}, {"id": "f442a2f2749f921849e22f37e0480ac04a3c3fec", "title": "Critical Features for Face Recognition in Humans and Machines", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/f442/a2f2749f921849e22f37e0480ac04a3c3fec.pdf"}, {"id": "60e2b9b2e0db3089237d0208f57b22a3aac932c1", "title": "Frankenstein: Learning Deep Face Representations Using Small Data", "year": "2017", "pdf": "https://arxiv.org/pdf/1603.06470.pdf"}, {"id": "b84ccf1c07c6d2061c8aadaca3dfc4e7d41cc1c9", "title": "Dynamic Feature Matching for Partial Face Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8467369"}, {"id": "6c40fc9df6588f7cb721537883167eede1b8d369", "title": "3D Face Reconstruction Based on Convolutional Neural Network", "year": 2017, "pdf": null}, {"id": "04221205249bdffd0f155ac68ac477613654aa42", "title": "Semantic facial scores and compact deep transferred descriptors for scalable face image retrieval", "year": "2018", "pdf": "http://doi.org/10.1016/j.neucom.2018.04.056"}, {"id": "02c7740af5540f23a2da23d1769e64a8042ec62e", "title": "Big Data : The Management", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/02c7/740af5540f23a2da23d1769e64a8042ec62e.pdf"}, {"id": "1f94734847c15fa1da68d4222973950d6b683c9e", "title": "Embedding Label Structures for Fine-Grained Feature Representation", "year": 2016, "pdf": "http://arxiv.org/abs/1512.02895"}, {"id": "8ea8cdee6f62751d87339f821d2b2a094ab4b260", "title": "Enabling Live Video Analytics with a Scalable and Privacy-Aware Framework", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3209659"}, {"id": "09f5033e1e91dae1f7f31cba2b65bbff1d5f8ca3", "title": "Face Recognition Based on Densely Connected Convolutional Networks", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8499078"}, {"id": "be4faea0971ef74096ec9800750648b7601dda65", "title": "Feature Analysis of Unsupervised Learning for Multi-task Classification Using Convolutional Neural Network", "year": "2017", "pdf": "http://doi.org/10.1007/s11063-017-9724-1"}, {"id": "2b632f090c09435d089ff76220fd31fd314838ae", "title": "Early Adaptation of Deep Priors in Age Prediction from Face Images", "year": 2017, "pdf": "http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w23/Hajibabaei_Early_Adaptation_of_ICCV_2017_paper.pdf"}, {"id": "dd8084b2878ca95d8f14bae73e1072922f0cc5da", "title": "Model Distillation with Knowledge Transfer in Face Classification, Alignment and Verification", "year": "2017", "pdf": "https://arxiv.org/pdf/1709.02929.pdf"}, {"id": "a2e0966f303f38b58b898d388d1c83e40b605262", "title": "ECLIPSE: Ensembles of Centroids Leveraging Iteratively Processed Spatial Eclipse Clustering", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354125"}, {"id": "0cb2dd5f178e3a297a0c33068961018659d0f443", "title": "IARPA Janus Benchmark-B Face Dataset", "year": 2017, "pdf": "http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf"}, {"id": "d44a93027208816b9e871101693b05adab576d89", "title": "On the Capacity of Face Representation", "year": "2017", "pdf": "https://arxiv.org/pdf/1709.10433.pdf"}, {"id": "9ca542d744149f0efc8b8aac8289f5e38e6d200c", "title": "Gender and Smile Classification Using Deep Convolutional Neural Networks", "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789587"}, {"id": "7b455cbb320684f78cd8f2443f14ecf5f50426db", "title": "A Fast and Robust Negative Mining Approach for Enrollment in Face Recognition Systems", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2017.33"}, {"id": "1b3e7caf4b456e3762a827aa623c3fb88ca0b1a0", "title": "Contrapositive Margin Softmax Loss for Face Verification", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3265679"}, {"id": "c607572fd2594ca83f732c9790fd590da9e69eb1", "title": "Comparative Evaluation of Deep Architectures for Face Recognition in Unconstrained Environment ( FRUE )", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/c607/572fd2594ca83f732c9790fd590da9e69eb1.pdf"}, {"id": "011e6146995d5d63c852bd776f782cc6f6e11b7b", "title": "Fast Training of Triplet-Based Deep Binary Embedding Networks", "year": 2016, "pdf": "http://openaccess.thecvf.com/content_cvpr_2016/papers/Zhuang_Fast_Training_of_CVPR_2016_paper.pdf"}, {"id": "91d513af1f667f64c9afc55ea1f45b0be7ba08d4", "title": "Automatic Face Image Quality Prediction", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/91d5/13af1f667f64c9afc55ea1f45b0be7ba08d4.pdf"}, {"id": "014b4335d055679bc680a6ceb6f1a264d8ce8a4a", "title": "Are You Sure You Want To Do That? Classification with Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.02652.pdf"}, {"id": "2ea78e128bec30fb1a623c55ad5d55bb99190bd2", "title": "Residual vs. Inception vs. Classical Networks for Low-Resolution Face Recognition", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/2ea7/8e128bec30fb1a623c55ad5d55bb99190bd2.pdf"}, {"id": "1ef4aac0ebc34e76123f848c256840d89ff728d0", "title": "Rapid Synthesis of Massive Face Sets for Improved Face Recognition", "year": 2017, "pdf": "http://www.openu.ac.il/home/hassner/projects/augmented_faces/Masietal2017rapid.pdf"}, {"id": "d6bdc70d259b38bbeb3a78db064232b4b4acc88f", "title": "Video-Based Face Association and Identification", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.27"}, {"id": "f2004fff215a17ac132310882610ddafe25ba153", "title": "Facial Expression Recognition via Deep Learning", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AICCSA.2017.124"}, {"id": "d0eb3fd1b1750242f3bb39ce9ac27fc8cc7c5af0", "title": "Minimalistic CNN-based ensemble model for gender prediction from face images", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d0eb/3fd1b1750242f3bb39ce9ac27fc8cc7c5af0.pdf"}, {"id": "635158d2da146e9de559d2742a2fa234e06b52db", "title": "Emotion Recognition in the Wild via Convolutional Neural Networks and Mapped Binary Patterns", "year": 2015, "pdf": "http://www.openu.ac.il/home/hassner/projects/cnn_emotions/LeviHassnerICMI15.pdf"}, {"id": "85860d38c66a5cf2e6ffd6475a3a2ba096ea2920", "title": "Celeb-500K: A Large Training Dataset for Face Recognition", "year": "2018", "pdf": "http://doi.org/10.1109/ICIP.2018.8451704"}, {"id": "eb3066de677f9f6131aab542d9d426aaf50ed2ce", "title": "Deep Transfer Network with 3D Morphable Models for Face Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373860"}, {"id": "dc1105c7171a0922bfde7612aa069231720ee694", "title": "Face image retrieval based on shape and texture feature fusion", "year": "2017", "pdf": "http://doi.org/10.1007/s41095-017-0091-7"}, {"id": "9a42c519f0aaa68debbe9df00b090ca446d25bc4", "title": "Face Recognition via Centralized Coordinate Learning", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/9a42/c519f0aaa68debbe9df00b090ca446d25bc4.pdf"}, {"id": "efd308393b573e5410455960fe551160e1525f49", "title": "Tracking Persons-of-Interest via Unsupervised Representation Adaptation", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/efd3/08393b573e5410455960fe551160e1525f49.pdf"}, {"id": "6bb95a0f3668cd36407c85899b71c9fe44bf9573", "title": "Face Attribute Prediction Using Off-The-Shelf Deep Learning Networks", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/6bb9/5a0f3668cd36407c85899b71c9fe44bf9573.pdf"}, {"id": "339937141ffb547af8e746718fbf2365cc1570c8", "title": "Facial Emotion Recognition in Real Time", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/3399/37141ffb547af8e746718fbf2365cc1570c8.pdf"}, {"id": "b4ee64022cc3ccd14c7f9d4935c59b16456067d3", "title": "Unsupervised Cross-Domain Image Generation", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/b4ee/64022cc3ccd14c7f9d4935c59b16456067d3.pdf"}, {"id": "e295c1aa47422eb35123053038e62e9aa50a2e3a", "title": "ChaLearn Looking at People 2015: Apparent Age and Cultural Event Recognition Datasets and Results", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406389"}, {"id": "28d4e027c7e90b51b7d8908fce68128d1964668a", "title": "Level Playing Field for Million Scale Face Recognition", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.00393.pdf"}, {"id": "1fc249ec69b3e23856b42a4e591c59ac60d77118", "title": "Evaluation of a 3D-aided pose invariant 2D face recognition system", "year": 2017, "pdf": "https://doi.org/10.1109/BTAS.2017.8272729"}, {"id": "c6eb026d3a0081f4cb5cde16d3170f8ecf8ce706", "title": "Face Recognition: From Traditional to Deep Learning Methods", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.00116.pdf"}, {"id": "0750a816858b601c0dbf4cfb68066ae7e788f05d", "title": "CosFace: Large Margin Cosine Loss for Deep Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1801.09414.pdf"}, {"id": "cfd4004054399f3a5f536df71f9b9987f060f434", "title": "Person Recognition in Social Media Photos", "year": "2018", "pdf": "https://arxiv.org/pdf/1710.03224.pdf"}, {"id": "b0c1615ebcad516b5a26d45be58068673e2ff217", "title": "How Image Degradations Affect Deep CNN-Based Face Recognition?", "year": "2016", "pdf": "https://arxiv.org/pdf/1608.05246.pdf"}, {"id": "3504907a2e3c81d78e9dfe71c93ac145b1318f9c", "title": "Unconstrained Still/Video-Based Face Verification with Deep Convolutional Neural Networks", "year": 2017, "pdf": "https://arxiv.org/pdf/1605.02686v3.pdf"}, {"id": "8da32ff9e3759dc236878ac240728b344555e4e9", "title": "Investigating Nuisance Factors in Face Recognition with DCNN Representation", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014820"}, {"id": "e79bacc03152ea55343e6af97bcd17d8904cf5ef", "title": "Recursive Spatial Transformer (ReST) for Alignment-Free Face Recognition", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237669"}, {"id": "b12431e61172443c534ea523a4d7407e847b5c5b", "title": "Yu\u0308z Tan\u0131maya Dayal\u0131 Kis\u0327i Bazl\u0131 Test Otomasyonu", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/b124/31e61172443c534ea523a4d7407e847b5c5b.pdf"}, {"id": "f20e0eefd007bc310d2a753ba526d33a8aba812c", "title": "Accurate and robust face recognition from RGB-D images with a deep learning approach", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/116e/c3a1a8225362a3e3e445df45036fae7cadc6.pdf"}, {"id": "4209783b0cab1f22341f0600eed4512155b1dee6", "title": "Accurate and Efficient Similarity Search for Large Scale Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.00365.pdf"}, {"id": "6412d8bbcc01f595a2982d6141e4b93e7e982d0f", "title": "Deep Convolutional Neural Network Using Triplets of Faces, Deep Ensemble, and Score-Level Fusion for Face Recognition", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.89"}, {"id": "d31328b12eef33e7722b8e5505d0f9d9abe2ffd9", "title": "Deep Unsupervised Domain Adaptation for Face Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373866"}, {"id": "10e4172dd4f4a633f10762fc5d4755e61d52dc36", "title": "Learning Multifunctional Binary Codes for Both Category and Attribute Oriented Retrieval Tasks", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100146"}, {"id": "0077cd8f97cafd2b389783858a6e4ab7887b0b6b", "title": "Face Image Reconstruction from Deep Templates", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/b971/266b29fcecf1d5efe1c4dcdc2355cb188ab0.pdf"}, {"id": "2e0d56794379c436b2d1be63e71a215dd67eb2ca", "title": "Improving precision and recall of face recognition in SIPP with combination of modified mean search and LSH", "year": "2017", "pdf": "https://arxiv.org/pdf/1709.03872.pdf"}, {"id": "0dd74bbda5dd3d9305636d4b6f0dad85d6e19572", "title": "Heterogeneous Face Attribute Estimation: A Deep Multi-Task Learning Approach", "year": "2018", "pdf": "https://arxiv.org/pdf/1706.00906.pdf"}, {"id": "363e5a0e4cd857e98de72a726ad6f80cea9c50ab", "title": "Fast Landmark Localization With 3D Component Reconstruction and CNN for Cross-Pose Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1708.09580.pdf"}, {"id": "78a11b7d2d7e1b19d92d2afd51bd3624eca86c3c", "title": "Improved Deep Metric Learning with Multi-class N-pair Loss Objective", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/78a1/1b7d2d7e1b19d92d2afd51bd3624eca86c3c.pdf"}, {"id": "4268ae436db79c4eee8bc06e9475caff3ff70d57", "title": "Five Principles for Crowd-Source Experiments in Face Recognition", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.146"}, {"id": "4aa18f3a1c85f7a09d3b0d6b28c0339199892d60", "title": "The Application of Neural Networks for Facial Landmarking on Mobile Devices", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/4aa1/8f3a1c85f7a09d3b0d6b28c0339199892d60.pdf"}, {"id": "26fa4c87a2c9e21e9207cc4aee2b9890b1ad5a0d", "title": "A domain adaptation approach to improve speaker turn embedding using face representation", "year": "2017", "pdf": "http://dl.acm.org/citation.cfm?id=3136800"}, {"id": "ae5e92abd5929ee7f0a5aa1622aa094bac4fae29", "title": "RGB-D Face Recognition via Deep Complementary and Common Feature Learning", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373805"}, {"id": "f294278e03868257bfce132b8cf189359ada915a", "title": "Boosting Face in Video Recognition via CNN Based Key Frame Extraction", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411214"}, {"id": "3d78c144672c4ee76d92d21dad012bdf3c3aa1a0", "title": "Unconstrained Still/Video-Based Face Verification with Deep Convolutional Neural Networks", "year": "2017", "pdf": "http://doi.org/10.1007/s11263-017-1029-3"}, {"id": "98b2f21db344b8b9f7747feaf86f92558595990c", "title": "Semantically Decomposing the Latent Spaces of Generative Adversarial Networks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/b9f0/29075a36f15202f0d213fe222dcf237fe65f.pdf"}, {"id": "d94d7ff6f46ad5cab5c20e6ac14c1de333711a0c", "title": "Face Album: Towards automatic photo management based on person identity on mobile phones", "year": 2017, "pdf": "https://doi.org/10.1109/ICASSP.2017.7952713"}, {"id": "697b0b9630213ca08a1ae1d459fabc13325bdcbb", "title": "Learning to Invert Local Binary Patterns", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/697b/0b9630213ca08a1ae1d459fabc13325bdcbb.pdf"}, {"id": "38f1fac3ed0fd054e009515e7bbc72cdd4cf801a", "title": "Finding Person Relations in Image Data of the Internet Archive", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.08246.pdf"}, {"id": "9e297343da13cf9ba0ad8b5b75c07723136f4885", "title": "Regularizing Face Net for Discrete-valued Pain Regression", "year": "2017", "pdf": null}, {"id": "e7906370eae8655fb69844ae1a3d986c9f37c902", "title": "Face recognition using Deep Learning", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/e790/6370eae8655fb69844ae1a3d986c9f37c902.pdf"}, {"id": "a3ab2a7d596626a25f680b7dc9710ea2d34a8cbb", "title": "Machine Learning in Radiology: Applications Beyond Image Interpretation.", "year": "2018", "pdf": "https://www.ncbi.nlm.nih.gov/pubmed/29158061"}, {"id": "1e8eee51fd3bf7a9570d6ee6aa9a09454254689d", "title": "Face Search at Scale", "year": 2017, "pdf": "http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/WangOttoJain_FaceSearchAtScale_TPAMI.pdf"}, {"id": "4801256b4ee39e71d5a9a1046c57e3ad4af6735a", "title": "Cascade Attention Networks For Group Emotion Recognition with Face, Body and Image Cues", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3264991"}, {"id": "97f3d35d3567cd3d973c4c435cdd6832461b7c3c", "title": "Unleash the Black Magic in Age: A Multi-Task Deep Neural Network Approach for Cross-Age Face Verification", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.75"}, {"id": "b49425f78907fcc447d181eb713abffc74dd85e4", "title": "Sampling Matters in Deep Embedding Learning", "year": "2017", "pdf": "https://arxiv.org/pdf/1706.07567.pdf"}, {"id": "734083b72b707dd2293ef2791f01506dec9f8a99", "title": "Non-rigid registration based model-free 3D facial expression recognition", "year": "2017", "pdf": "http://doi.org/10.1016/j.cviu.2017.07.005"}, {"id": "1da5fc63d66fbf750b0e15c5ef6d4274ca73cca1", "title": "Research on face recognition method based on deep learning in natural environment", "year": 2017, "pdf": null}, {"id": "050a149051a5d268fcc5539e8b654c2240070c82", "title": "Magisterske\u0301 a doktorske\u0301 studijn\u0131\u0301 programy", "year": "2014", "pdf": "https://pdfs.semanticscholar.org/050a/149051a5d268fcc5539e8b654c2240070c82.pdf"}, {"id": "31b05f65405534a696a847dd19c621b7b8588263", "title": "UMDFaces: An annotated face dataset for training deep networks", "year": 2017, "pdf": "http://arxiv.org/abs/1611.01484"}, {"id": "406c5aeca71011fd8f8bd233744a81b53ccf635a", "title": "Scalable softmax loss for face verification", "year": 2017, "pdf": null}, {"id": "15f70a0ad8903017250927595ae2096d8b263090", "title": "Learning Robust Deep Face Representation", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/15f7/0a0ad8903017250927595ae2096d8b263090.pdf"}, {"id": "1280b35e4a20036fcfd82ee09f45a3fca190276f", "title": "Face Verification Based on Feature Transfer via PCA-SVM Framework", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/iThings-GreenCom-CPSCom-SmartData.2017.166"}, {"id": "5bb87c7462c6c1ec5d60bde169c3a785ba5ea48f", "title": "Targeting Ultimate Accuracy: Face Recognition via Deep Embedding", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/7589/58f2340ba46c6708b73d5427985d5623a512.pdf"}, {"id": "93d11da02205bbc5ae68e521e421f70a4b74a7f7", "title": "Emotion recognition and reaction prediction in videos", "year": 2017, "pdf": null}, {"id": "dfaa547451aae219cd2ca7a761e6c16c1e1d0add", "title": "Representation Learning by Rotating Your Faces", "year": "2018", "pdf": "https://arxiv.org/pdf/1705.11136.pdf"}, {"id": "5905b4610389cd3b11a3a1ce06c05fee36a97f86", "title": "Unconstrained Face Recognition Using a Set-to-Set Distance Measure on Deep Learned Features", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7936556"}, {"id": "e7144f5c19848e037bb96e225d1cfd961f82bd9f", "title": "Heterogeneous Face Recognition: Recent Advances in Infrared-to-Visible Matching", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.126"}, {"id": "ec39e9c21d6e2576f21936b1ecc1574dadaf291e", "title": "Pose-Robust Face Verification by Exploiting Competing Tasks", "year": 2017, "pdf": "https://doi.org/10.1109/WACV.2017.130"}, {"id": "48121f5937accc8050b0c9bf2be6d1c58b07a8a0", "title": "Multi-pose face recognition by dynamic loss weights", "year": 2017, "pdf": null}, {"id": "42cc9ea3da1277b1f19dff3d8007c6cbc0bb9830", "title": "Coordinated Local Metric Learning", "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Saxena_Coordinated_Local_Metric_ICCV_2015_paper.pdf"}, {"id": "193bc8b663d041bc34134a8407adc3e546daa9cc", "title": "A Quantitative Comparison of Methods for 3D Face Reconstruction from 2D Images", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373908"}, {"id": "c758b9c82b603904ba8806e6193c5fefa57e9613", "title": "Heterogeneous Face Recognition with CNNs", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/c758/b9c82b603904ba8806e6193c5fefa57e9613.pdf"}, {"id": "fa641327dc5873276f0af453a2caa1634c16f143", "title": "ChaLearn Looking at People RGB-D Isolated and Continuous Datasets for Gesture Recognition", "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789590"}, {"id": "3acb6b3e3f09f528c88d5dd765fee6131de931ea", "title": "Novel representation for driver emotion recognition in motor vehicle videos", "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296393"}, {"id": "6afe1f668eea8dfdd43f0780634073ed4545af23", "title": "Deep learning for content-based video retrieval in film and television production", "year": 2017, "pdf": "https://doi.org/10.1007/s11042-017-4962-9"}, {"id": "0c0db39cac8cb76b52cfdbe10bde1c53d68d202f", "title": "Metric-based Generative Adversarial Network", "year": 2017, "pdf": "http://doi.acm.org/10.1145/3123266.3123334"}, {"id": "dd65f71dac86e36eecbd3ed225d016c3336b4a13", "title": "Visual Kinship Recognition of Families in the Wild", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337841"}, {"id": "a022eff5470c3446aca683eae9c18319fd2406d5", "title": "Deep learning for semantic description of visual human traits. (Apprentissage profond pour la description s\u00e9mantique des traits visuels humains)", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/a022/eff5470c3446aca683eae9c18319fd2406d5.pdf"}, {"id": "c2508af974b8b1fd4ef097ef625e8bfd07474be0", "title": "Correlation-Based Face Detection for Recognizing Faces in Videos", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461485"}, {"id": "d40c4e370d35264e324e4e3d5df59e51518c9979", "title": "A Transfer Learning based Feature-Weak-Relevant Method for Image Clustering", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.04068.pdf"}, {"id": "11ad162b3165b4353df8d7b4153fb26d6a310d11", "title": "Recognizing Families In the Wild (RFIW): Data Challenge Workshop in conjunction with ACM MM 2017", "year": 2017, "pdf": null}, {"id": "7ce03597b703a3b6754d1adac5fbc98536994e8f", "title": "On the Intrinsic Dimensionality of Face Representation", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/7ce0/3597b703a3b6754d1adac5fbc98536994e8f.pdf"}, {"id": "486a82f50835ea888fbc5c6babf3cf8e8b9807bc", "title": "Face Search at Scale: 80 Million Gallery", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/486a/82f50835ea888fbc5c6babf3cf8e8b9807bc.pdf"}, {"id": "2a48dc596c7a2f496169360e819b56a6c8d38e67", "title": "Fine Tuning Dual Streams Deep Network with Multi-scale Pyramid Decision for Heterogeneous Face Recognition", "year": "2018", "pdf": "http://doi.org/10.1007/s11063-018-9942-1"}, {"id": "84c5b45328dee855c4855a104ac9c0558cc8a328", "title": "Conformal Mapping of a 3D Face Representation onto a 2D Image for CNN Based Face Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411213"}, {"id": "380dd0ddd5d69adc52defc095570d1c22952f5cc", "title": "Improving Smiling Detection with Race and Gender Diversity", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/380d/d0ddd5d69adc52defc095570d1c22952f5cc.pdf"}, {"id": "26ebe98753acec806b7281d085110c06d9cd1e16", "title": "Self-Error-Correcting Convolutional Neural Network for Learning with Noisy Labels", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.22"}, {"id": "0b82bf595e76898993ed4f4b2883c42720c0f277", "title": "Improving Face Recognition by Exploring Local Features with Visual Attention", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411229"}, {"id": "0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e", "title": "Large Age-Gap face verification by feature injection in deep networks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf"}, {"id": "a13a27e65c88b6cb4a414fd4f6bca780751a59db", "title": "Deep convolution neural network with stacks of multi-scale convolutional layer block using triplet of faces for face recognition in the wild", "year": 2016, "pdf": "https://doi.org/10.1109/SMC.2016.7844934"}, {"id": "27da432cf2b9129dce256e5bf7f2f18953eef5a5", "title": "Face Recognition in Low Quality Images: A Survey", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.11519.pdf"}, {"id": "f3850d8ec9779e8e15da9831ba23d4cdca1dd4ee", "title": "Machine learning assisted optimization of electrochemical properties for Ni-rich cathode materials", "year": "2018", "pdf": "http://doi.org/10.1038/s41598-018-34201-4"}, {"id": "44078d0daed8b13114cffb15b368acc467f96351", "title": "Triplet probabilistic embedding for face verification and clustering", "year": 2016, "pdf": "http://arxiv.org/pdf/1604.05417v1.pdf"}, {"id": "727d03100d4a8e12620acd7b1d1972bbee54f0e6", "title": "von Mises-Fisher Mixture Model-based Deep learning: Application to Face Verification", "year": "2017", "pdf": "https://arxiv.org/pdf/1706.04264.pdf"}, {"id": "5fff61302adc65d554d5db3722b8a604e62a8377", "title": "Additive Margin Softmax for Face Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1801.05599.pdf"}, {"id": "e4aeaf1af68a40907fda752559e45dc7afc2de67", "title": "Exponential Discriminative Metric Embedding in Deep Learning", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/e4ae/af1af68a40907fda752559e45dc7afc2de67.pdf"}, {"id": "b5ca8d4f259f35c1f3edfd9f108ce29881e478b0", "title": "Disentangled Representation Learning GAN for Pose-Invariant Face Recognition", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099624"}, {"id": "ddf099f0e0631da4a6396a17829160301796151c", "title": "Learning Face Image Quality from Human Assessments", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/ddf0/99f0e0631da4a6396a17829160301796151c.pdf"}, {"id": "f29aae30c2cb4c73a3c814408ee5692e22176329", "title": "Pairwise Relational Networks using Local Appearance Features for Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.06405.pdf"}, {"id": "d89cfed36ce8ffdb2097c2ba2dac3e2b2501100d", "title": "Robust Face Recognition via Multimodal Deep Face Representation", "year": "2015", "pdf": "https://arxiv.org/pdf/1509.00244.pdf"}, {"id": "01e14d8ffd6767336d50c2b817a7b7744903e567", "title": "Deep Network Shrinkage Applied to Cross-Spectrum Face Recognition", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.128"}, {"id": "6dcf418c778f528b5792104760f1fbfe90c6dd6a", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984"}, {"id": "62d1a31b8acd2141d3a994f2d2ec7a3baf0e6dc4", "title": "Noise-resistant network: a deep-learning method for face recognition under noise", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/62d1/a31b8acd2141d3a994f2d2ec7a3baf0e6dc4.pdf"}, {"id": "3baa3d5325f00c7edc1f1427fcd5bdc6a420a63f", "title": "Enhancing convolutional neural networks for face recognition with occlusion maps and batch triplet loss", "year": "2018", "pdf": "https://arxiv.org/pdf/1707.07923.pdf"}, {"id": "a3d8b5622c4b9af1f753aade57e4774730787a00", "title": "Pose-Aware Person Recognition", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.10120.pdf"}, {"id": "173657da03e3249f4e47457d360ab83b3cefbe63", "title": "HKU-Face : A Large Scale Dataset for Deep Face Recognition Final Report", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/1736/57da03e3249f4e47457d360ab83b3cefbe63.pdf"}, {"id": "84fe5b4ac805af63206012d29523a1e033bc827e", "title": "Ear recognition: More than a survey", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf"}, {"id": "459e840ec58ef5ffcee60f49a94424eb503e8982", "title": "One-shot Face Recognition by Promoting Underrepresented Classes", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/459e/840ec58ef5ffcee60f49a94424eb503e8982.pdf"}, {"id": "f3495bf7f7d827c72cc4e7a4850eaf54a998db11", "title": "Trends and Controversies", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8423530"}, {"id": "cfb8bc66502fb5f941ecdb22aec1fdbfdb73adce", "title": "Git Loss for Deep Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.08512.pdf"}, {"id": "de0ee491d2747a6f3d171f813fe6f5cdb3a27fd6", "title": "FPGA-accelerated deep convolutional neural networks for high throughput and energy efficiency", "year": 2017, "pdf": "https://doi.org/10.1002/cpe.3850"}, {"id": "48320c6c156e7e25bfc04171b5ee6003de356a11", "title": "AF-Softmax for Face Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8525505"}, {"id": "2c92839418a64728438c351a42f6dc5ad0c6e686", "title": "Pose-Aware Face Recognition in the Wild", "year": 2016, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Masi_Pose-Aware_Face_Recognition_CVPR_2016_paper.pdf"}, {"id": "571b83f7fc01163383e6ca6a9791aea79cafa7dd", "title": "SeqFace: Make full use of sequence information for face recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1803.06524.pdf"}, {"id": "94f74c6314ffd02db581e8e887b5fd81ce288dbf", "title": "A Light CNN for Deep Face Representation with Noisy Labels", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/94f7/4c6314ffd02db581e8e887b5fd81ce288dbf.pdf"}, {"id": "856bd32ee16cc531bcb1814fbae2f66582e21cc7", "title": "Face Verification via Learned Representation on Feature-Rich Video Frames", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7850956"}, {"id": "0ed91520390ebdee13a0ac13d028f65d959bdc10", "title": "Hard Example Mining with Auxiliary Embeddings", "year": "", "pdf": "https://pdfs.semanticscholar.org/0ed9/1520390ebdee13a0ac13d028f65d959bdc10.pdf"}, {"id": "26c89f890da91119ffa16d5a23fba963257ef3fc", "title": "Tattoo Image Search at Scale: Joint Detection and Compact Representation Learning", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.00218.pdf"}, {"id": "e6b45d5a86092bbfdcd6c3c54cda3d6c3ac6b227", "title": "Pairwise Relational Networks for Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.04976.pdf"}, {"id": "1bc214c39536c940b12c3a2a6b78cafcbfddb59a", "title": "Leveraging Gabor Phase for Face Identification in Controlled Scenarios", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/1bc2/14c39536c940b12c3a2a6b78cafcbfddb59a.pdf"}, {"id": "8cd9475a3a1b2bcccf2034ce8f4fe691c57a4889", "title": "Noisy Face Image Sets Refining Collaborated with Discriminant Feature Space Learning", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.138"}, {"id": "1ffe20eb32dbc4fa85ac7844178937bba97f4bf0", "title": "Face Clustering: Representation and Pairwise Constraints", "year": "2018", "pdf": "https://arxiv.org/pdf/1706.05067.pdf"}, {"id": "47e14fdc6685f0b3800f709c32e005068dfc8d47", "title": "Secure Face Matching Using Fully Homomorphic Encryption", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.00577.pdf"}, {"id": "e13360cda1ebd6fa5c3f3386c0862f292e4dbee4", "title": "Range Loss for Deep Face Recognition with Long-tail", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/e133/60cda1ebd6fa5c3f3386c0862f292e4dbee4.pdf"}, {"id": "06bd34951305d9f36eb29cf4532b25272da0e677", "title": "A Fast and Accurate System for Face Detection, Identification, and Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.07586.pdf"}, {"id": "20e504782951e0c2979d9aec88c76334f7505393", "title": "Robust LSTM-Autoencoders for Face De-Occlusion in the Wild", "year": 2018, "pdf": "https://arxiv.org/pdf/1612.08534v1.pdf"}, {"id": "b6259115b819424de53bb92f64cc459dcb649f31", "title": "Learning Feature Representation for Face Verification", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AVSS.2017.8078466"}, {"id": "f1280f76933ba8b7f4a6b8662580504f02bb4ab6", "title": "Gender Classification by Deep Learning on Millions of Weakly Labelled Images", "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7836703"}, {"id": "8395cf3535a6628c3bdc9b8d0171568d551f5ff0", "title": "Entropy Non-increasing Games for the Improvement of Dataflow Programming", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/8395/cf3535a6628c3bdc9b8d0171568d551f5ff0.pdf"}, {"id": "cc47368fe303c6cbda38caf5ac0e1d1c9d7e2a52", "title": "University Classroom Attendance Based on Deep Learning", "year": 2017, "pdf": null}, {"id": "77c7d8012fe4179a814c1241a37a2256361bc1a4", "title": "BGP Face Retrieval Based on Coding Pyramid", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8530228"}, {"id": "069bb452e015ef53f0ef30e9690e460ccc73cf03", "title": "Multicolumn Networks for Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.09192.pdf"}, {"id": "0ad4a814b30e096ad0e027e458981f812c835aa0", "title": "Leveraging mid-level deep representations for predicting face attributes in the wild", "year": 2016, "pdf": "http://arxiv.org/pdf/1602.01827v1.pdf"}, {"id": "054738ce39920975b8dcc97e01b3b6cc0d0bdf32", "title": "Towards the design of an end-to-end automated system for image and video-based recognition", "year": 2016, "pdf": "https://doi.org/10.1109/ITA.2016.7888183"}, {"id": "fffefc1fb840da63e17428fd5de6e79feb726894", "title": "Fine-Grained Age Estimation in the wild with Attention LSTM Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.10445.pdf"}, {"id": "cb38b4a5e517b4bcb00efbb361f4bdcbcf1dca2c", "title": "Learning towards Minimum Hyperspherical Energy", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.09298.pdf"}, {"id": "80097a879fceff2a9a955bf7613b0d3bfa68dc23", "title": "Active Self-Paced Learning for Cost-Effective and Progressive Face Identification", "year": "2018", "pdf": "https://arxiv.org/pdf/1701.03555.pdf"}, {"id": "ef559d5f02e43534168fbec86707915a70cd73a0", "title": "DeepInsight: Multi-Task Multi-Scale Deep Learning for Mental Disorder Diagnosis", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/ef55/9d5f02e43534168fbec86707915a70cd73a0.pdf"}, {"id": "036fac2b87cf04c3d93e8a59da618d56a483a97d", "title": "Query Adaptive Late Fusion for Image Retrieval", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.13103.pdf"}, {"id": "6f6b4e2885ea1d9bea1bb2ed388b099a5a6d9b81", "title": "Structured Output SVM Prediction of Apparent Age, Gender and Smile from Deep Features", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.96"}, {"id": "4b48e912a17c79ac95d6a60afed8238c9ab9e553", "title": "Minimum Margin Loss for Deep Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.06741.pdf"}, {"id": "040dc119d5ca9ea3d5fc39953a91ec507ed8cc5d", "title": "Large-scale Bisample Learning on ID vs. Spot Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.03018.pdf"}, {"id": "12e4545d07e1793df87520f384b37a015815d2f7", "title": "Age invariant face recognition: a survey on facial aging databases, techniques and effect of aging", "year": "2018", "pdf": "http://doi.org/10.1007/s10462-018-9661-z"}, {"id": "beab10d1bdb0c95b2f880a81a747f6dd17caa9c2", "title": "DeepDeblur: Fast one-step blurry face images restoration", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/beab/10d1bdb0c95b2f880a81a747f6dd17caa9c2.pdf"}, {"id": "6ac1dc59e823d924e797afaf5c4a960ed7106f2a", "title": "Deep Facial Expression Recognition: A Survey", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.08348.pdf"}, {"id": "7aa4c16a8e1481629f16167dea313fe9256abb42", "title": "Multi-task learning for face identification and attribute estimation", "year": 2017, "pdf": "https://doi.org/10.1109/ICASSP.2017.7952703"}, {"id": "0e2ea7af369dbcaeb5e334b02dd9ba5271b10265", "title": "Multi-Level Feature Abstraction from Convolutional Neural Networks for Multimodal Biometric Identification", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.01332.pdf"}, {"id": "de0eb358b890d92e8f67592c6e23f0e3b2ba3f66", "title": "Inference-Based Similarity Search in Randomized Montgomery Domains for Privacy-Preserving Biometric Identification", "year": "2018", "pdf": "https://arxiv.org/pdf/1711.01587.pdf"}, {"id": "57178b36c21fd7f4529ac6748614bb3374714e91", "title": "IARPA Janus Benchmark - C: Face Dataset and Protocol", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217"}, {"id": "6eaf446dec00536858548fe7cc66025b70ce20eb", "title": "GP-GAN: Gender Preserving GAN for Synthesizing Faces from Landmarks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/6eaf/446dec00536858548fe7cc66025b70ce20eb.pdf"}, {"id": "c2bd9322fa2d0a00fc62075cc0f1996fc75d42a8", "title": "Age Estimation Guided Convolutional Neural Network for Age-Invariant Face Recognition", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014811"}, {"id": "0c65226edb466204189b5aec8f1033542e2c17aa", "title": "A study of CNN outside of training conditions", "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296997"}, {"id": "f0b30a9bb9740c2886d96fc44d6f35b8eacab4f3", "title": "Are You Sure You Want To Do That ? Classification with Interpretable Queries", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/f0b3/0a9bb9740c2886d96fc44d6f35b8eacab4f3.pdf"}, {"id": "79581c364cefe53bff6bdd224acd4f4bbc43d6d4", "title": "Descriptors and regions of interest fusion for in- and cross-database gender classification in the wild", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/7958/1c364cefe53bff6bdd224acd4f4bbc43d6d4.pdf"}, {"id": "c866a2afc871910e3282fd9498dce4ab20f6a332", "title": "Surveillance Face Recognition Challenge", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.09691.pdf"}, {"id": "380d5138cadccc9b5b91c707ba0a9220b0f39271", "title": "Deep Imbalanced Learning for Face Recognition and Attribute Prediction", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.00194.pdf"}, {"id": "2c052a1c77a3ec2604b3deb702d77c41418c7d3e", "title": "What Is the Challenge for Deep Learning in Unconstrained Face Recognition?", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373863"}, {"id": "0ee737085af468f264f57f052ea9b9b1f58d7222", "title": "SiGAN: Siamese Generative Adversarial Network for Identity-Preserving Face Hallucination", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.08370.pdf"}, {"id": "a89cbc90bbb4477a48aec185f2a112ea7ebe9b4d", "title": "High Performance Large Scale Face Recognition with Multi-cognition Softmax and Feature Retrieval", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265434"}, {"id": "bbf20adb59b7461e0d040e665bf64ae5f478eda0", "title": "Automated face swapping and its detection", "year": 2017, "pdf": null}, {"id": "d4e669d5d35fa0ca9f8d9a193c82d4153f5ffc4e", "title": "A Lightened CNN for Deep Face Representation", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/d4e6/69d5d35fa0ca9f8d9a193c82d4153f5ffc4e.pdf"}, {"id": "fed8cc533037d7d925df572a440fd89f34d9c1fd", "title": "Simple Triplet Loss Based on Intra/Inter-Class Metric Learning for Face Verification", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.194"}, {"id": "177d03c5851f7082cb023a20fa8a2cd1dfb59467", "title": "Difference networks and second-order difference networks", "year": 2017, "pdf": null}, {"id": "832a9584e85af1675d49ee35fd13283b21ce3a3f", "title": "Generating Photo-Realistic Training Data to Improve Face Recognition Accuracy", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.00112.pdf"}, {"id": "72a55554b816b66a865a1ec1b4a5b17b5d3ba784", "title": "Real-Time Face Identification via CNN and Boosted Hashing Forest", "year": 2016, "pdf": "http://vislab.ucr.edu/Biometrics16/CVPRW_Vizilter.pdf"}, {"id": "8813368c6c14552539137aba2b6f8c55f561b75f", "title": "Trunk-Branch Ensemble Convolutional Neural Networks for Video-Based Face Recognition", "year": 2018, "pdf": "https://arxiv.org/pdf/1607.05427v1.pdf"}, {"id": "0db8e6eb861ed9a70305c1839eaef34f2c85bbaf", "title": "Towards Large-Pose Face Frontalization in the Wild", "year": 2017, "pdf": "https://arxiv.org/pdf/1704.06244v1.pdf"}, {"id": "91b7270b7f2a8a52df6a689f73d14986b2d48ba1", "title": "Improving Convolutional Neural Networks Via Compacting Features", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461618"}, {"id": "c3285a1d6ec6972156fea9e6dc9a8d88cd001617", "title": "Extreme 3D Face Reconstruction: Seeing Through Occlusions", "year": "2017", "pdf": "https://arxiv.org/pdf/1712.05083.pdf"}, {"id": "6f22628d34a486d73c6b46eb071200a00e3abae3", "title": "Learning Pose-Aware Models for Pose-Invariant Face Recognition in the Wild.", "year": "2018", "pdf": "https://www.ncbi.nlm.nih.gov/pubmed/29994497"}, {"id": "03f7041515d8a6dcb9170763d4f6debd50202c2b", "title": "Clustering Millions of Faces by Identity", "year": 2018, "pdf": "http://biometrics.cse.msu.edu/Publications/Face/OttoWangJain_ClusteringMillionsOfFacesByIdentity_TPAMI17.pdf"}, {"id": "a8117a4733cce9148c35fb6888962f665ae65b1e", "title": "A Good Practice Towards Top Performance of Face Recognition: Transferred Deep Feature Fusion", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/a811/7a4733cce9148c35fb6888962f665ae65b1e.pdf"}, {"id": "ba6769c165967c8dcb11fe5e0be2153ddbe99c7e", "title": "Deep learning for gender recognition", "year": 2015, "pdf": null}, {"id": "7d40e7e5c01bd551edf65902386401e1b8b8014b", "title": "Channel-Level Acceleration of Deep Face Representations", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7303876"}, {"id": "3933e323653ff27e68c3458d245b47e3e37f52fd", "title": "Evaluation of a 3 D-aided Pose Invariant 2 D Face Recognition System", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/3933/e323653ff27e68c3458d245b47e3e37f52fd.pdf"}, {"id": "7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22", "title": "Labeled Faces in the Wild: A Survey", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf"}, {"id": "59fc69b3bc4759eef1347161e1248e886702f8f7", "title": "Final Report of Final Year Project HKU-Face : A Large Scale Dataset for Deep Face Recognition", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/59fc/69b3bc4759eef1347161e1248e886702f8f7.pdf"}, {"id": "3d1b59fa0252e72afd8b107a1dac47cc2d5f8bb2", "title": "Recognizing Minimal Facial Sketch by Generating Photorealistic Faces With the Guidance of Descriptive Attributes", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461385"}, {"id": "77d31d2ec25df44781d999d6ff980183093fb3de", "title": "The Multiverse Loss for Robust Transfer Learning", "year": 2016, "pdf": "http://openaccess.thecvf.com/content_cvpr_2016/supplemental/Littwin_The_Multiverse_Loss_2016_CVPR_supplemental.pdf"}, {"id": "48499deeaa1e31ac22c901d115b8b9867f89f952", "title": "Interim Report of Final Year Project HKU-Face : A Large Scale Dataset for Deep Face Recognition", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/4849/9deeaa1e31ac22c901d115b8b9867f89f952.pdf"}, {"id": "dbced84d839165d9b494982449aa2eb9109b8467", "title": "Extreme 3D Face Reconstruction: Looking Past Occlusions", "year": "2017", "pdf": null}, {"id": "e988be047b28ba3b2f1e4cdba3e8c94026139fcf", "title": "Multi-Task Convolutional Neural Network for Pose-Invariant Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1702.04710.pdf"}]}
\ No newline at end of file diff --git a/site/datasets/unknown/celeba.json b/site/datasets/unknown/celeba.json new file mode 100644 index 00000000..07b79f9d --- /dev/null +++ b/site/datasets/unknown/celeba.json @@ -0,0 +1 @@ +{"id": "6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4", "paper": {"paper_id": "6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4", "key": "celeba", "title": "Deep Learning Face Attributes in the Wild", "year": 2015, "pdf": "http://arxiv.org/pdf/1411.7766v2.pdf", "address": {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}, "name": "CelebA"}, "citations": [{"id": "17c0d99171efc957b88c31a465c59485ab033234", "title": "To learn image super-resolution, use a GAN to learn how to do image degradation first", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11458.pdf"}, {"id": "95ea564bd983129ddb5535a6741e72bb1162c779", "title": "Multi-Task Learning by Deep Collaboration and Application in Facial Landmark Detection", "year": "2017", "pdf": "https://arxiv.org/pdf/1711.00111.pdf"}, {"id": "f881d2a04de838c8950a279e1ed8c0f9886452af", "title": "Multi-Stage Variational Auto-Encoders for Coarse-to-Fine Image Generation", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/f881/d2a04de838c8950a279e1ed8c0f9886452af.pdf"}, {"id": "f89e5a8800b318fa03289b5cc67df54b956875b4", "title": "Do GANs actually learn the distribution? An empirical study", "year": "2017", "pdf": "https://arxiv.org/pdf/1706.08224.pdf"}, {"id": "24205a60cbf1cc12d7e0a9d44ed3c2ea64ed7852", "title": "Deep Multi-Task Learning for Joint Prediction of Heterogeneous Face Attributes", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.30"}, {"id": "e2afea1a84a5bdbcb64d5ceadaa2249195e1fd82", "title": "DOOM Level Generation Using Generative Adversarial Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.09154.pdf"}, {"id": "7ef0cc4f3f7566f96f168123bac1e07053a939b2", "title": "Triangular Similarity Metric Learning: a Siamese Architecture Approach. ( L'apprentissage de similarit\u00e9 triangulaire en utilisant des r\u00e9seaux siamois)", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/e735/b8212d8a81909753291d5d06789a917014f8.pdf"}, {"id": "1fd54172f7388cd83ed78ff9165519296de5cf20", "title": "Changing the Image Memorability: From Basic Photo Editing to GANs", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.03825.pdf"}, {"id": "5c9e0f15222424022201422c7de534e06575d3d6", "title": "An Improved Method for Semantic Image Inpainting with GANs: Progressive Inpainting", "year": "2018", "pdf": "http://doi.org/10.1007/s11063-018-9877-6"}, {"id": "b69badabc3fddc9710faa44c530473397303b0b9", "title": "Unsupervised Image-to-Image Translation Networks", "year": "2017", "pdf": "https://arxiv.org/pdf/1703.00848.pdf"}, {"id": "7c0c9ab92d49941089979c1e344fe66efc873bdd", "title": "Generative Adversarial Examples", "year": "2018", "pdf": null}, {"id": "7dab6fbf42f82f0f5730fc902f72c3fb628ef2f0", "title": "An Unsupervised Approach to Solving Inverse Problems using Generative Adversarial Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.07281.pdf"}, {"id": "40c3f90f0abf842ee6f6009c414fde4f86b82005", "title": "Synchronization Detection and Recovery of Steganographic Messages with Adversarial Learning", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/40c3/f90f0abf842ee6f6009c414fde4f86b82005.pdf"}, {"id": "fa60521dabd2b64137392b4885e4d989f4b86430", "title": "Physics-Based Generative Adversarial Models for Image Restoration and Beyond", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.00605.pdf"}, {"id": "e72c5fb54c3d14404ebd1bf993e51d0056f6c429", "title": "Tempered Adversarial Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1802.04374.pdf"}, {"id": "a90226c41b79f8b06007609f39f82757073641e2", "title": "\u0392-vae: Learning Basic Visual Concepts with a Constrained Variational Framework", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/a902/26c41b79f8b06007609f39f82757073641e2.pdf"}, {"id": "cb30c1370885033bc833bc7ef90a25ee0900c461", "title": "FaceOff: Anonymizing Videos in the Operating Rooms", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.04440.pdf"}, {"id": "833cd4265bd8162d3cfb483ce8f31eaef28e7a2e", "title": "Towards Effective Gans", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/833c/d4265bd8162d3cfb483ce8f31eaef28e7a2e.pdf"}, {"id": "c86afba9c77a9b1085ccc6c44c36fa3a1fdb51c5", "title": "New Losses for Generative Adversarial Learning", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.01290.pdf"}, {"id": "b50f2ad8d7f08f99d4ba198120120f599f98095e", "title": "Spatiotemporal data fusion for precipitation nowcasting", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/b50f/2ad8d7f08f99d4ba198120120f599f98095e.pdf"}, {"id": "a12bc1b9bcd81cc0a8f7209f0538c5f356e5f4d3", "title": "Hallucinating Face Image by Regularization Models in High-Resolution Feature Space", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8310603"}, {"id": "2785c5769489825671a6138fdf0537fcd444038a", "title": "A Deep Cascade Network for Unaligned Face Attribute Classification", "year": "2018", "pdf": "https://arxiv.org/pdf/1709.03851.pdf"}, {"id": "284b5dafe6d8d7552794ccd2efb4eabb12dc3512", "title": "Efficient and accurate inversion of multiple scattering with deep learning", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/284b/5dafe6d8d7552794ccd2efb4eabb12dc3512.pdf"}, {"id": "40a63746a710baf4a694fd5a4dd8b5a3d9fc2846", "title": "Invertible Conditional GANs for image editing", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/40a6/3746a710baf4a694fd5a4dd8b5a3d9fc2846.pdf"}, {"id": "02aff7faf2f6b775844809805424417eed30f440", "title": "A Tale of Three Probabilistic Families: Discriminative, Descriptive and Generative Models", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.04261.pdf"}, {"id": "7bce03583d85b307d5b84872e2ff147661a70158", "title": "Facial Expression Recognition in the Wild: A Cycle-Consistent Adversarial Attention Transfer Approach", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3240574"}, {"id": "96390f95a73a6bd495728b6cd2a97554ef187f76", "title": "Pan Olympus : Sensor Privacy through Utility Aware", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/9639/0f95a73a6bd495728b6cd2a97554ef187f76.pdf"}, {"id": "744fe47157477235032f7bb3777800f9f2f45e52", "title": "Progressive Growing of GANs for Improved Quality, Stability, and Variation", "year": "2017", "pdf": "https://arxiv.org/pdf/1710.10196.pdf"}, {"id": "ba397fe5d4f0beaa7370b88e9875dbba19aa7bfc", "title": "SmileNet: Registration-Free Smiling Face Detection In The Wild", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265396"}, {"id": "47e14fdc6685f0b3800f709c32e005068dfc8d47", "title": "Secure Face Matching Using Fully Homomorphic Encryption", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.00577.pdf"}, {"id": "46471a285b1d13530f1885622d4551b48c19fc67", "title": "Generating Artificial Data for Private Deep Learning", "year": "2018", "pdf": "https://arxiv.org/pdf/1803.03148.pdf"}, {"id": "c866a2afc871910e3282fd9498dce4ab20f6a332", "title": "Surveillance Face Recognition Challenge", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.09691.pdf"}, {"id": "0d1a87dad1e4538cc7bd3c923767c8bf1a9b779f", "title": "The Riemannian Geometry of Deep Generative Models", "year": "2017", "pdf": "https://arxiv.org/pdf/1711.08014.pdf"}, {"id": "f2b2d50d6ca72666bab34e0f101ae1b18b434925", "title": "High-Fidelity Monocular Face Reconstruction based on an Unsupervised Model-based Face Autoencoder.", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/f2b2/d50d6ca72666bab34e0f101ae1b18b434925.pdf"}, {"id": "1ce4587e27e2cf8ba5947d3be7a37b4d1317fbee", "title": "Deep fusion of visual signatures for client-server facial analysis", "year": 2016, "pdf": "https://arxiv.org/pdf/1611.00142v2.pdf"}, {"id": "dde5125baefa1141f1ed50479a3fd67c528a965f", "title": "Synthesizing Normalized Faces from Facial Identity Features", "year": "2017", "pdf": "https://arxiv.org/pdf/1701.04851.pdf"}, {"id": "fd4537b92ab9fa7c653e9e5b9c4f815914a498c0", "title": "One-Sided Unsupervised Domain Mapping", "year": "2017", "pdf": "https://arxiv.org/pdf/1706.00826.pdf"}, {"id": "290c8196341bbac80efc8c89af5fc60e1b8c80e6", "title": "Learning deep representations by mutual information estimation and maximization", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.06670.pdf"}, {"id": "5a4a53339068eebd1544b9f430098f2f132f641b", "title": "Hierarchical Disentangled Representations", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/5a4a/53339068eebd1544b9f430098f2f132f641b.pdf"}, {"id": "c86e6ed734d3aa967deae00df003557b6e937d3d", "title": "Generative Adversarial Networks with Decoder-Encoder Output Noise", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.03923.pdf"}, {"id": "81d67fa2f5eb76c9b0afb2d887e95ba78b6e46c9", "title": "Learning Implicit Generative Models with the Method of Learned Moments", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.11006.pdf"}, {"id": "04cdc847f3b10d894582969feee0f37fbd3745e5", "title": "Compressed Sensing with Deep Image Prior and Learned Regularization", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.06438.pdf"}, {"id": "784a83437b3dba49c0d7ccc10ac40497b84661a5", "title": "Generative Attribute Controller with Conditional Filtered Generative Adversarial Networks", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100224"}, {"id": "a81769a36c9ed7b6146a408eb253eb8e0d3ad41e", "title": "Super-Fine Attributes with Crowd Prototyping.", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/a817/69a36c9ed7b6146a408eb253eb8e0d3ad41e.pdf"}, {"id": "6075c07ecb29d551ffa474c3eca45f2da5fd5007", "title": "Shallow convolutional neural network for eyeglasses detection in facial images", "year": 2017, "pdf": null}, {"id": "41dd2ca8929bfdae49a4bf85de74df4723ef9c3b", "title": "Correction by Projection: Denoising Images with Generative Adversarial Networks", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/41dd/2ca8929bfdae49a4bf85de74df4723ef9c3b.pdf"}, {"id": "3f6a6050609ba205ec94b8af186a9dca60a8f65e", "title": "Harmonizing Maximum Likelihood with Gans", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/3f6a/6050609ba205ec94b8af186a9dca60a8f65e.pdf"}, {"id": "a59e338fec32adee012e31cdb0513ec20d6c8232", "title": "Phase Retrieval Under a Generative Prior", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.04261.pdf"}, {"id": "e909b9e0bbfc37d0b99acad5014e977daac7e2bd", "title": "Adversarial Training of Variational Auto-Encoders for High Fidelity Image Generation", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.10323.pdf"}, {"id": "12417ed7ae81fb4e6c07f501ace9ea463349481b", "title": "Pairwise Augmented GANs with Adversarial Reconstruction Loss", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.04920.pdf"}, {"id": "b3f18013079e0535dcda045ac5145c201287aec3", "title": "Multi-Label Dilated Recurrent Network for Sequential Face Alignment", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8486438"}, {"id": "834f5ab0cb374b13a6e19198d550e7a32901a4b2", "title": "Face Translation between Images and Videos using Identity-aware CycleGAN", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/834f/5ab0cb374b13a6e19198d550e7a32901a4b2.pdf"}, {"id": "250449a9827e125d6354f019fc7bc6205c5fd549", "title": "Adversarial Reconstruction Loss", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/2504/49a9827e125d6354f019fc7bc6205c5fd549.pdf"}, {"id": "4131aa28d640d17e1d63ca82e55cc0b280db0737", "title": "Coulomb Gans: Provably Optimal Nash Equi-", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/74a1/e28dd2c03076124282482074e10bb02bc643.pdf"}, {"id": "e4fb693d8b2755b8e989e0c59b28db3c75591503", "title": "Classification of E-Commerce-Related Images Using Hierarchical Classification with Deep Neural Networks", "year": 2017, "pdf": null}, {"id": "4ed0be0b5d67cff63461ba79f2a7928d652cf310", "title": "Threat of Adversarial Attacks on Deep Learning in Computer Vision: A Survey", "year": "2018", "pdf": "https://arxiv.org/pdf/1801.00553.pdf"}, {"id": "af6af58ba12920762638e1d0b8310a0d9961b7be", "title": "Sketch-to-Image Generation Using Deep Contextual Completion", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/af6a/f58ba12920762638e1d0b8310a0d9961b7be.pdf"}, {"id": "4cfdd0c8313ac4f92845dcd658115beb115b97ce", "title": "Multi-Task Learning as Multi-Objective Optimization", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.04650.pdf"}, {"id": "775c15a5dfca426d53c634668e58dd5d3314ea89", "title": "Image Quality-aware Deep Networks Ensemble for Efficient Gender Recognition in the Wild", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/775c/15a5dfca426d53c634668e58dd5d3314ea89.pdf"}, {"id": "cb8b2db657cd6b6ccac13b56e2ca62b7d88eda68", "title": "Log Hyperbolic Cosine Loss Improves Varia-", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/4b89/03b4eb3a60c0fb01c229e3192a5f9159d460.pdf"}, {"id": "fdaf65b314faee97220162980e76dbc8f32db9d6", "title": "Face recognition using both visible light image and near-infrared image and a deep network", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/fdaf/65b314faee97220162980e76dbc8f32db9d6.pdf"}, {"id": "18ec3b37a33db39ac0633677e944cc81be58f7ba", "title": "Cooperative Training of Descriptor and Generator Networks", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/18ec/3b37a33db39ac0633677e944cc81be58f7ba.pdf"}, {"id": "5fd147f57fc087b35650f7f3891d457e4c745d48", "title": "Coulomb GANs: Provably Optimal Nash Equilibria via Potential Fields", "year": "2017", "pdf": "https://arxiv.org/pdf/1708.08819.pdf"}, {"id": "d87ccfc42cf6a72821d357aab0990e946918350b", "title": "Exploiting the Potential of Standard Convolutional Autoencoders for Image Restoration by Evolutionary Search", "year": "2018", "pdf": "https://arxiv.org/pdf/1803.00370.pdf"}, {"id": "2b507f659b341ed0f23106446de8e4322f4a3f7e", "title": "Deep Identity-aware Transfer of Facial Attributes", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/2b50/7f659b341ed0f23106446de8e4322f4a3f7e.pdf"}, {"id": "551b75dd57829b584de5f51b63426efac81018db", "title": "Recision T Raining", "year": "2017", "pdf": null}, {"id": "79fc3c10ce0d0f48b25c8cf460048087c97e2e90", "title": "Variational Bi-domain Triplet Autoencoder", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.08672.pdf"}, {"id": "b0103d264f756144f8acc1994f2327699e280652", "title": "Convolutional Gaussian Mixture Models with Application to Compressive Sensing", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8450817"}, {"id": "257e61e6b38ae23b7ddce9907c05b0e78be4d79d", "title": "The LORACs prior for VAEs: Letting the Trees Speak for the Data", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.06891.pdf"}, {"id": "641fd2edcf93fa29181952356e93a83a26012aa2", "title": "Following are some examples from CIFAR dataset : Goal : To alter the training criteria to obtain \u2018 objectness \u2019 in the synthesis of images", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/424a/f8e96c76de69153a0d528cf7d41d5c69a1a1.pdf"}, {"id": "2da1a80955df1612766ffdf63916a6a374780161", "title": "Generating steganographic images via adversarial training", "year": "2017", "pdf": "https://arxiv.org/pdf/1703.00371.pdf"}, {"id": "c3955d74f2a084a8ddcbd7e73952c326e81804b2", "title": "Mutual Information Neural Estimation", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/c395/5d74f2a084a8ddcbd7e73952c326e81804b2.pdf"}, {"id": "cfcf66e4b22dc7671a5941e94e9d4afae75ba2f8", "title": "The Cramer Distance as a Solution to Biased Wasserstein Gradients", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/cfcf/66e4b22dc7671a5941e94e9d4afae75ba2f8.pdf"}, {"id": "7da7678882d06a1f93636f58fe89635da5b1dd0c", "title": "EnhanceNet: Single Image Super-Resolution Through Automated Texture Synthesis", "year": "2017", "pdf": "https://arxiv.org/pdf/1612.07919.pdf"}, {"id": "fcc6fd9b243474cd96d5a7f4a974f0ef85e7ddf7", "title": "InclusiveFaceNet: Improving Face Attribute Detection with Race and Gender Diversity", "year": "2017", "pdf": "https://arxiv.org/pdf/1712.00193.pdf"}, {"id": "4a855d86574c9bd0a8cfc522bc1c77164819c0bc", "title": "PixelCNN Models with Auxiliary Variables for Natural Image Modeling", "year": "2017", "pdf": "https://arxiv.org/pdf/1612.08185.pdf"}, {"id": "e75a2c37940a0f4e8b9f8d40d059ae4da2c443dd", "title": "Improved Boundary Equilibrium Generative Adversarial Networks", "year": 2018, "pdf": "https://doi.org/10.1109/ACCESS.2018.2804278"}, {"id": "a75ee7f4c4130ef36d21582d5758f953dba03a01", "title": "Human face attributes prediction with Deep Learning", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/a75e/e7f4c4130ef36d21582d5758f953dba03a01.pdf"}, {"id": "4941f92222d660f9b60791ba95796e51a7157077", "title": "Conditional CycleGAN for Attribute Guided Face Image Generation", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/4941/f92222d660f9b60791ba95796e51a7157077.pdf"}, {"id": "34a4c733bc2b53253dbebe67f1af83b969c2e657", "title": "Learning Cross-Domain Disentangled Deep Representation with Supervision from A Single Domain", "year": "2017", "pdf": null}, {"id": "b0c3bc3e3ca143444f5193735f2aad89d1776276", "title": "Training Generative Reversible Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.01610.pdf"}, {"id": "8e24db957be2b643db464cc566bfabc650f1ffac", "title": "Geometry-Aware Generative Adverserial Networks", "year": "2017", "pdf": null}, {"id": "3555d849b85e9416e9496c9976084b0e692b63cd", "title": "Towards Effective Gans", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/3555/d849b85e9416e9496c9976084b0e692b63cd.pdf"}, {"id": "0b783e750da34c61ea404be8bc40788fd66c867d", "title": "Sliced Wasserstein Generative Models", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/a385/8e0b9f754f3aa42f716e04a5cabdb986d2b0.pdf"}, {"id": "ff83aade985b981fbf2233efbbd749600e97454c", "title": "Towards Understanding Adversarial Learning for Joint Distribution Matching", "year": "2017", "pdf": "https://arxiv.org/pdf/1709.01215.pdf"}, {"id": "2f587ab6694fdcfe6bd2977120ebeb758e28d77f", "title": "Coupled Generative Adversarial Nets", "year": "", "pdf": "http://pdfs.semanticscholar.org/2f58/7ab6694fdcfe6bd2977120ebeb758e28d77f.pdf"}, {"id": "76ec5c774bb3fd04f9e68864a411286536a544c5", "title": "Latent Constraints: Learning to Generate Conditionally from Unconditional Generative Models", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/76ec/5c774bb3fd04f9e68864a411286536a544c5.pdf"}, {"id": "b348d5c7ac93d1148265284d71234e200c9c5f02", "title": "GibbsNet: Iterative Adversarial Inference for Deep Graphical Models", "year": "2017", "pdf": "https://arxiv.org/pdf/1712.04120.pdf"}, {"id": "26690f2548c6dbf630de202b40dec417b20c9b6c", "title": "Variational Inference of Disentangled Latent Concepts from Unlabeled Observations", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/2669/0f2548c6dbf630de202b40dec417b20c9b6c.pdf"}, {"id": "b2504b0b2a7e06eab02a3584dd46d94a3f05ffdf", "title": "Conditional Neural Processes", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.01613.pdf"}, {"id": "6d83c33e917e63a2c1a9f23fdd58f01a95f0c87a", "title": "Enhance deep learning performance in face recognition", "year": 2017, "pdf": null}, {"id": "0e8760fc198a7e7c9f4193478c0e0700950a86cd", "title": "Brute-Force Facial Landmark Analysis With A 140, 000-Way Classifier", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/0e87/60fc198a7e7c9f4193478c0e0700950a86cd.pdf"}, {"id": "385750bcf95036c808d63db0e0b14768463ff4c6", "title": "Autoencoding beyond pixels using a learned similarity metric", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/3857/50bcf95036c808d63db0e0b14768463ff4c6.pdf"}, {"id": "7f217ff1f3c21c84ed116d32e3b8d1509a306fbd", "title": "Direct Optimization through arg max for Discrete Variational Auto-Encoder", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.02867.pdf"}, {"id": "0677dd5377895b3c61cea0e6a143f38b84f1ebd7", "title": "Super-Resolution via Deep Learning", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/0677/dd5377895b3c61cea0e6a143f38b84f1ebd7.pdf"}, {"id": "886a50f269ace4b140ddee9d4c7277743b27e250", "title": "Unsupervised Learning of 3D Model Reconstruction from Hand-Drawn Sketches", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3240699"}, {"id": "174ddb6379b91a0e799e9988d0e522a5af18f91d", "title": "ChatPainter: Improving Text to Image Generation using Dialogue", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/174d/db6379b91a0e799e9988d0e522a5af18f91d.pdf"}, {"id": "bd17d6ba5525dec8762dbaacf6cc3e0cc3f5ff90", "title": "Necst: Neural Joint Source-channel Coding", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/bd17/d6ba5525dec8762dbaacf6cc3e0cc3f5ff90.pdf"}, {"id": "907fbe706ec14101978a63c6252e0d75e657e8dd", "title": "The Unreasonable Effectiveness of Texture Transfer for Single Image Super-resolution", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.00043.pdf"}, {"id": "485f8b28dcb7a5ffc98beb49fcbb50cf0a0b6331", "title": "A Latent Space Understandable Generative Adversarial Network: SelfExGAN", "year": 2017, "pdf": "https://doi.org/10.1109/DICTA.2017.8227390"}, {"id": "0dd74bbda5dd3d9305636d4b6f0dad85d6e19572", "title": "Heterogeneous Face Attribute Estimation: A Deep Multi-Task Learning Approach", "year": "2018", "pdf": "https://arxiv.org/pdf/1706.00906.pdf"}, {"id": "68b01afed57ed7130d993dffc03dcbfa36d4e038", "title": "Adversarial Learning with Local Coordinate Coding", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.04895.pdf"}, {"id": "4539582f25f4316c1655fbb308ee8a5b11649e38", "title": "Stable and improved generative adversarial nets (GANS): A constructive survey", "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296606"}, {"id": "b972683d702a65d3ee7a25bc931a5890d1072b6b", "title": "Demographic Analysis from Biometric Data: Achievements, Challenges, and New Frontiers", "year": 2018, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2017.2669035"}, {"id": "d0fdf0f3f47d9f9d11e84961573b324c51518f34", "title": "Painting completion with generative translation models", "year": "2018", "pdf": "http://doi.org/10.1007/s11042-018-6761-3"}, {"id": "8699268ee81a7472a0807c1d3b1db0d0ab05f40d", "title": "Channel-Recurrent Autoencoding for Image Modeling", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/8699/268ee81a7472a0807c1d3b1db0d0ab05f40d.pdf"}, {"id": "626c12d6ccb1405c97beca496a3456edbf351643", "title": "Conditional Variance Penalties and Domain Shift Robustness", "year": "2017", "pdf": "https://arxiv.org/pdf/1710.11469.pdf"}, {"id": "1450296fb936d666f2f11454cc8f0108e2306741", "title": "Learning to Discover Cross-Domain Relations with Generative Adversarial Networks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/1450/296fb936d666f2f11454cc8f0108e2306741.pdf"}, {"id": "df999184b1bb5691cd260b2b77df7ef00c0fe7b1", "title": "On Latent Distributions Without Finite Mean in Generative Models", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.01670.pdf"}, {"id": "31b05f65405534a696a847dd19c621b7b8588263", "title": "UMDFaces: An annotated face dataset for training deep networks", "year": 2017, "pdf": "http://arxiv.org/abs/1611.01484"}, {"id": "380d5138cadccc9b5b91c707ba0a9220b0f39271", "title": "Deep Imbalanced Learning for Face Recognition and Attribute Prediction", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.00194.pdf"}, {"id": "fed9e971e042b40cc659aca6e338d79dc1d4b59c", "title": "Grouping-by-id: Guarding against Adversar-", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/fed9/e971e042b40cc659aca6e338d79dc1d4b59c.pdf"}, {"id": "bf15ba4db09fd805763738ec2cb48c09481785dd", "title": "Training Deep Neural Network in Limited Precision", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.05486.pdf"}, {"id": "4a45b8f8decc178305af06d758ac7428a9070fad", "title": "Augmented CycleGAN: Learning Many-to-Many Mappings from Unpaired Data", "year": "2018", "pdf": "https://arxiv.org/pdf/1802.10151.pdf"}, {"id": "11f732fe8f127c393cc8404ee8db2b3e85dd3d59", "title": "Disentangling Latent Factors with Whitening", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.03444.pdf"}, {"id": "9329523dc0bd4e2896d5f63cf2440f21b7a16f16", "title": "Do They All Look the Same? Deciphering Chinese, Japanese and Koreans by Fine-Grained Deep Learning", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d853/107e81c3db4a7909b599bff82ab1c48772af.pdf"}, {"id": "8326d3e57796dad294ab1c14a0688221550098b6", "title": "ABC-GAN: Adaptive Blur and Control for improved training stability of Generative Adversarial Networks", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/8326/d3e57796dad294ab1c14a0688221550098b6.pdf"}, {"id": "ba38c4f083ccde8a932952ba865e57d930476b1b", "title": "Domain-Adaptive generative adversarial networks for sketch-to-photo inversion", "year": 2017, "pdf": "https://doi.org/10.1109/MLSP.2017.8168181"}, {"id": "6b95a3dbec92071c8552576930e69455c70e529c", "title": "BEGAN: Boundary Equilibrium Generative Adversarial Networks", "year": "2017", "pdf": "https://arxiv.org/pdf/1703.10717.pdf"}, {"id": "fd6d2e4f939b8d804a6b5908bded8f1ad2563e38", "title": "Stabilizing GAN Training with Multiple Random Projections", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/fd6d/2e4f939b8d804a6b5908bded8f1ad2563e38.pdf"}, {"id": "a5531b5626c1ee3b6f9aed281a98338439d06d12", "title": "Multichannel Attention Network for Analyzing Visual Behavior in Public Speaking", "year": "2018", "pdf": "https://arxiv.org/pdf/1707.06830.pdf"}, {"id": "366d20f8fd25b4fe4f7dc95068abc6c6cabe1194", "title": "Are facial attributes adversarially robust?", "year": 2016, "pdf": "http://arxiv.org/pdf/1605.05411v1.pdf"}, {"id": "f8a2a6b821a092ac43acd4e7366fe7c1e9285317", "title": "Attribute-controlled face photo synthesis from simple line drawing", "year": "2017", "pdf": "https://arxiv.org/pdf/1702.02805.pdf"}, {"id": "1ec59aece51a698bce34f393cf6474b926fd89ad", "title": "Exemplar Guided Unsupervised Image-to-Image Translation", "year": "2018", "pdf": null}, {"id": "8134b052a9aedd573dd16649a611f68b48e30cb2", "title": "InverseFaceNet: Deep Single-Shot Inverse Face Rendering From A Single Image", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/8134/b052a9aedd573dd16649a611f68b48e30cb2.pdf"}, {"id": "8395cf3535a6628c3bdc9b8d0171568d551f5ff0", "title": "Entropy Non-increasing Games for the Improvement of Dataflow Programming", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/8395/cf3535a6628c3bdc9b8d0171568d551f5ff0.pdf"}, {"id": "ecf2ba5ea183a6be63b57543a19dd41e8017daaf", "title": "Cooperative Learning of Energy-Based Model and Latent Variable Model via MCMC Teaching", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/58f8/90f76930b49ed517ff03fbf57ab1fc0c5608.pdf"}, {"id": "404c7839afe2fec48a06f83d2a532c05ad8ba0d3", "title": "Vehicle Classification using Transferable Deep Neural Network Features", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/404c/7839afe2fec48a06f83d2a532c05ad8ba0d3.pdf"}, {"id": "b4fe9594e1de682e7270645ba95ab64727b6632e", "title": "Generative Adversarial Positive-Unlabelled Learning", "year": "2018", "pdf": "https://arxiv.org/pdf/1711.08054.pdf"}, {"id": "d50c6d22449cc9170ab868b42f8c72f8d31f9b6c", "title": "Dynamic Multi-Task Learning with Convolutional Neural Network", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/d50c/6d22449cc9170ab868b42f8c72f8d31f9b6c.pdf"}, {"id": "79815f31f42708fd59da345f8fa79f635a070730", "title": "Autoregressive Quantile Networks for Generative Modeling", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.05575.pdf"}, {"id": "9939498315777b40bed9150d8940fc1ac340e8ba", "title": "ChaLearn Looking at People and Faces of the World: Face AnalysisWorkshop and Challenge 2016", "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789583"}, {"id": "76b11c281ac47fe6d95e124673a408ee9eb568e3", "title": "Real-time Multi View Face Detection and Pose Estimation Aishwarya", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/76b1/1c281ac47fe6d95e124673a408ee9eb568e3.pdf"}, {"id": "312afff739d1e0fcd3410adf78be1c66b3480396", "title": "Facial Attributes: Accuracy and Adversarial Robustness", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/312a/fff739d1e0fcd3410adf78be1c66b3480396.pdf"}, {"id": "04d09bed8b05ed10d25c1cc2ab47381b0ee34c2f", "title": "YesilcamGAN: Automatic face translation to Yesilcam artists", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404362"}, {"id": "c35724d227eb1e3d680333469fb9b94c677e871f", "title": "Multi-view Generative Adversarial Networks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/c357/24d227eb1e3d680333469fb9b94c677e871f.pdf"}, {"id": "c8855bebdaa985dfc4c1a07e5f74a0e29787e47e", "title": "Multi-label Object Attribute Classification using a Convolutional Neural Network", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.04309.pdf"}, {"id": "8bff7353fa4f75629ea418ca8db60477a751db93", "title": "Invariance of Weight Distributions in Rectified MLPs", "year": "2018", "pdf": "https://arxiv.org/pdf/1711.09090.pdf"}, {"id": "ab3b196c5386f7ec2d05870eeb8872c8b8e33d77", "title": "Unconditional Generative Models", "year": "2017", "pdf": null}, {"id": "8e723e8a3a5a9ea258591d384232e0251f842a1c", "title": "Twin-GAN - Unpaired Cross-Domain Image Translation with Weight-Sharing GANs", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.00946.pdf"}, {"id": "056be8a896f71be4a1dee67b01f4d59e3e982304", "title": "Generative Models of Visually Grounded Imagination", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/056b/e8a896f71be4a1dee67b01f4d59e3e982304.pdf"}, {"id": "72edc24c67c34b5f2c98086a689bf0f3591e393d", "title": "An Introduction to Image Synthesis with Generative Adversarial Nets", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/72ed/c24c67c34b5f2c98086a689bf0f3591e393d.pdf"}, {"id": "4d3cbc5799d7963477da279dae9a08ac4d459157", "title": "Deep Learning for Nonlinear Diffractive Imaging", "year": "2018", "pdf": null}, {"id": "a8db91308f59bc9452e87fc553eecea67632c443", "title": "Energy-relaxed Wasserstein GANs(EnergyWGAN): Towards More Stable and High Resolution Image Generation", "year": "2017", "pdf": null}, {"id": "f3356570afde9002601a46395d565031945c7a5a", "title": "Occlusion-aware face inpainting via generative adversarial networks", "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296472"}, {"id": "9fb372fd2fb79571de1cc388154d4a3f0547d440", "title": "PBGen: Partial Binarization of Deconvolution-Based Generators for Edge Intelligence", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/9fb3/72fd2fb79571de1cc388154d4a3f0547d440.pdf"}, {"id": "7fa62c091a14830ae256dc00b512f7d4b4cf5b94", "title": "Stabilizing GAN Training with Multiple Random Projections", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/7fa6/2c091a14830ae256dc00b512f7d4b4cf5b94.pdf"}, {"id": "6f9873e2a7bc279c4f0a45c1a6e831ef3ba78ae7", "title": "Improving GAN Training via Binarized Representation Entropy (BRE) Regularization", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.03644.pdf"}, {"id": "318c4c25d86511690cc5df7b041a6392e8cc4ea8", "title": "Fashion-Gen: The Generative Fashion Dataset and Challenge", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.08317.pdf"}, {"id": "9aab33ce8d6786b3b77900a9b25f5f4577cea461", "title": "Automatic Semantic Face Recognition", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961739"}, {"id": "7b6f0c4b22aee0cb4987cba9df121d4076fac5a5", "title": "On Learning 3D Face Morphable Model from In-the-wild Images", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.09560.pdf"}, {"id": "74a1e28dd2c03076124282482074e10bb02bc643", "title": "Coulomb Gans: Provably Optimal Nash Equi-", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/74a1/e28dd2c03076124282482074e10bb02bc643.pdf"}, {"id": "c8adbe00b5661ab9b3726d01c6842c0d72c8d997", "title": "Deep Architectures for Face Attributes", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/c8ad/be00b5661ab9b3726d01c6842c0d72c8d997.pdf"}, {"id": "bea185a15d5df7bbfce83bc684c316412703efbb", "title": "Pixelnn: Example-based Image Synthesis", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/bea1/85a15d5df7bbfce83bc684c316412703efbb.pdf"}, {"id": "60bc358296ae11ac8f11286bba0a49ac7e797d26", "title": "Diverse Image-to-Image Translation via Disentangled Representations", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.00948.pdf"}, {"id": "9063b17ccfdf73fc789d01d3c44c451244638528", "title": "Detecting Both Machine and Human Created Fake Face Images In the Wild", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3267367"}, {"id": "fc3e097ea7dd5daa7d314ecebe7faad9af5e62fb", "title": "Variational Inference and Model Selection with Generalized Evidence Bounds", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/fc3e/097ea7dd5daa7d314ecebe7faad9af5e62fb.pdf"}, {"id": "4fec382efed4e08a36fafa3710b97f0b20de1ebe", "title": "Binarized Representation Entropy (bre) Regularization", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/4fec/382efed4e08a36fafa3710b97f0b20de1ebe.pdf"}, {"id": "9b19be86280c8dbb3fdccc24297449290bd2b6aa", "title": "Robust Compressive Phase Retrieval via Deep Generative Priors", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.05854.pdf"}, {"id": "85cad2b23e2ed7098841285bae74aafbff921659", "title": "Pa-gan: Improving Gan Training by Progressive Augmentation", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/85ca/d2b23e2ed7098841285bae74aafbff921659.pdf"}, {"id": "e667ba14fd3ea15491ad7c7f2f7e3622d231eeae", "title": "Face verification using convolutional neural networks with Siamese architecture", "year": 2017, "pdf": null}, {"id": "341de07abfb89bf78f3a72513c8bce40d654e0a3", "title": "Sparse and Deep Generalizations of the FRAME Model", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/4d54/360c1eb3c7f3485ee450e7979aef7ec8019e.pdf"}, {"id": "b63041d05b78a66724fbcb2803508999bf885d6b", "title": "Deep Sets", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/b630/41d05b78a66724fbcb2803508999bf885d6b.pdf"}, {"id": "88726ee727d38c7a101f79412ff1cfc9b0e35f04", "title": "Graph-Regularized Locality-Constrained Joint Dictionary and Residual Learning for Face Sketch Synthesis", "year": "2019", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8467342"}, {"id": "7c2494c8b59a76404996e4b34889da36d140dd4a", "title": "Automatic makeup based on generative adversarial nets", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3240903"}, {"id": "e22cf1ca10c11991c2a43007e37ca652d8f0d814", "title": "A Biologically Inspired Visual Working Memory", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/e22c/f1ca10c11991c2a43007e37ca652d8f0d814.pdf"}, {"id": "fed8cc533037d7d925df572a440fd89f34d9c1fd", "title": "Simple Triplet Loss Based on Intra/Inter-Class Metric Learning for Face Verification", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.194"}, {"id": "fd96432675911a702b8a4ce857b7c8619498bf9f", "title": "Improved Face Detection and Alignment using Cascade Deep Convolutional Network", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/fd96/432675911a702b8a4ce857b7c8619498bf9f.pdf"}, {"id": "16f79b8917f53eff7da88c6cde9300cff1572eb8", "title": "Face Hallucination Based on Key Parts Enhancement", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462170"}, {"id": "947399fef66bd8c536c6f784a0501b34e4e094bf", "title": "Towards Recovery of Conditional Vectors from Conditional Generative Adversarial Networks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/9473/99fef66bd8c536c6f784a0501b34e4e094bf.pdf"}, {"id": "bb97664df153ac563e46ec2233346129cafe601b", "title": "A study on the use of Boundary Equilibrium GAN for Approximate Frontalization of Unconstrained Faces to aid in Surveillance", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.05611.pdf"}, {"id": "22bebedc1a5f3556cb4f577bdbe032299a2865e8", "title": "Effective training of convolutional neural networks for face-based gender and age prediction", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/22be/bedc1a5f3556cb4f577bdbe032299a2865e8.pdf"}, {"id": "8d9067da4ba5c57643ee7a84cd5c5d5674384937", "title": "Sorting out Lipschitz function approximation", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.05381.pdf"}, {"id": "a47ac8569ab1970740cff9f1643f77e9143a62d4", "title": "Associative Compression Networks for Representation Learning", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.02476.pdf"}, {"id": "d80564cea654d11b52c0008891a0fd2988112049", "title": "Semi-supervised Conditional GANs", "year": "2017", "pdf": "https://arxiv.org/pdf/1708.05789.pdf"}, {"id": "372bf2716c53e353be6c3f027493f1a40edb6640", "title": "MINE: Mutual Information Neural Estimation", "year": "2018", "pdf": "https://arxiv.org/pdf/1801.04062.pdf"}, {"id": "65ec52a3e0a0f6a46fd140ff83bb82d7d02a2d45", "title": "Learning Hierarchical Features from Generative Models", "year": "2017", "pdf": "https://arxiv.org/pdf/1702.08396.pdf"}, {"id": "5e39deb4bff7b887c8f3a44dfe1352fbcde8a0bd", "title": "Supervised COSMOS Autoencoder: Learning Beyond the Euclidean Loss!", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.06221.pdf"}, {"id": "66f55dc04aaf4eefdecef202211ad7563f7a703b", "title": "Synthesizing Programs for Images using Reinforced Adversarial Learning", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.01118.pdf"}, {"id": "fa32b29e627086d4302db4d30c07a9d11dcd6b84", "title": "Weakly Supervised Facial Attribute Manipulation via Deep Adversarial Network", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354123"}, {"id": "d9ee64038aea3a60120e9f7de16eb4130940a103", "title": "Message Passing Multi-Agent GANs", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d9ee/64038aea3a60120e9f7de16eb4130940a103.pdf"}, {"id": "cbdca5e0f1fd3fd745430497d372a2a30b7bb0c5", "title": "Towards Distributed Coevolutionary GANs", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.08194.pdf"}, {"id": "d21ebaab3f715dc7178966ff146711882e6a6fee", "title": "Globally and locally consistent image completion", "year": "2017", "pdf": "http://dl.acm.org/citation.cfm?id=3073659"}, {"id": "d1697250de6f91d3a3266c1ff0fdce0bf96acfe3", "title": "A C Lassification \u2013 B Ased P Erspective on Gan D Istributions", "year": "2017", "pdf": null}, {"id": "07a1e6d26028b28185b7a3eee86752c240a24261", "title": "MODE: automated neural network model debugging via state differential analysis and input selection", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3236082"}, {"id": "84e9de36dd7915f9334db5cc1fe567e17d717495", "title": "Fine-grained categorization via CNN-based automatic extraction and integration of object-level and part-level features", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/84e9/de36dd7915f9334db5cc1fe567e17d717495.pdf"}, {"id": "b22b4817757778bdca5b792277128a7db8206d08", "title": "SCAN: Learning Hierarchical Compositional Visual Concepts", "year": "2017", "pdf": "https://arxiv.org/pdf/1707.03389.pdf"}, {"id": "3802da31c6d33d71b839e260f4022ec4fbd88e2d", "title": "Deep Attributes for One-Shot Face Recognition", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/3802/da31c6d33d71b839e260f4022ec4fbd88e2d.pdf"}, {"id": "ec2027c2dd93e4ee8316cc0b3069e8abfdcc2ecf", "title": "Latent Variable PixelCNNs for Natural Image Modeling", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/252c/2e5e59d7b08887095b272d19dcd76fcf4a82.pdf"}, {"id": "cd5ef3aeebc231e2c833ef55cf0571aa990c5ff8", "title": "Image Quality Assessment Techniques Improve Training", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/cd5e/f3aeebc231e2c833ef55cf0571aa990c5ff8.pdf"}, {"id": "296502c6370cabd2b7e38e71cfc757d2e5fa2199", "title": "Detection of Deep Network Generated Images Using Disparities in Color Components", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.07276.pdf"}, {"id": "f7ba77d23a0eea5a3034a1833b2d2552cb42fb7a", "title": "LOTS about attacking deep features", "year": "2017", "pdf": "https://arxiv.org/pdf/1611.06179.pdf"}, {"id": "4cb48924acdcc0b20ef05ea5f5e856b081d9b40f", "title": "A Classification-Based Study of Covariate Shift in GAN Distributions", "year": "2018", "pdf": "https://arxiv.org/pdf/1711.00970.pdf"}, {"id": "6903496ee5d4c24ca5f3f18211f406e0ba8442d6", "title": "Multi-Mapping Image-to-Image Translation with Central Biasing Normalization", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.10050.pdf"}, {"id": "68b6ec13d06facacf5637f90828ab5b6e352be60", "title": "Neural Proximal Gradient Descent for Compressive Imaging", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.03963.pdf"}, {"id": "9baf0509f63a3322d127ae4374aa5b0f9d5439b8", "title": "Two Birds with One Stone: Transforming and Generating Facial Images with Iterative GAN", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/9baf/0509f63a3322d127ae4374aa5b0f9d5439b8.pdf"}, {"id": "6e911227e893d0eecb363015754824bf4366bdb7", "title": "Wasserstein Divergence for GANs", "year": "2018", "pdf": "https://arxiv.org/pdf/1712.01026.pdf"}, {"id": "a6051a9ae4e09faa02dcc45c0d34ce3b1c50382b", "title": "Face attribute prediction with convolutional neural networks", "year": 2017, "pdf": "https://doi.org/10.1109/ROBIO.2017.8324596"}, {"id": "4d90d7834ae25ee6176c096d5d6608555766c0b1", "title": "Face and Body Association for Video-Based Face Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354115"}, {"id": "d81dbc2960e527e91c066102aabdaf9eb8b15f85", "title": "Deep Directed Generative Models with Energy-Based Probability Estimation", "year": "2016", "pdf": "https://arxiv.org/pdf/1606.03439.pdf"}, {"id": "2d6d4899c892346a9bc8902481212d7553f1bda4", "title": "Neural Face Editing with Intrinsic Image Disentangling SUPPLEMENTARY MATERIAL", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/2d6d/4899c892346a9bc8902481212d7553f1bda4.pdf"}, {"id": "6b7f27cff688d5305c65fbd90ae18f3c6190f762", "title": "Generative networks as inverse problems with Scattering transforms", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.06621.pdf"}, {"id": "84c35fc21db3bcd407a4ffb009912b6ac5a47e3c", "title": "Mgan: Training Generative Adversarial Nets", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/84c3/5fc21db3bcd407a4ffb009912b6ac5a47e3c.pdf"}, {"id": "b44d8ecac21867c540d9122a150c8d8c0875cbe6", "title": "Mixture Density Generative Adversarial Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.00152.pdf"}, {"id": "d8526863f35b29cbf8ac2ae756eaae0d2930ffb1", "title": "Face Generation for Low-Shot Learning Using Generative Adversarial Networks", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265439"}, {"id": "9cc4abd2ec10e5fa94ff846c5ee27377caf17cf0", "title": "Improved Techniques for GAN based Facial Inpainting", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.08774.pdf"}, {"id": "e43a18384695ae0acc820171236a39811ec2cd58", "title": "Kin-Verification Model on FIW Dataset Using Multi-Set Learning and Local Features", "year": 2017, "pdf": null}, {"id": "0217fb2a54a4f324ddf82babc6ec6692a3f6194f", "title": "InfoGAN: Interpretable Representation Learning by Information Maximizing Generative Adversarial Nets", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/0217/fb2a54a4f324ddf82babc6ec6692a3f6194f.pdf"}, {"id": "7e654380bd0d1f4c00e85da71a3081d3ada432ef", "title": "Mgan: Training Generative Adversarial Nets", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/7e65/4380bd0d1f4c00e85da71a3081d3ada432ef.pdf"}, {"id": "e6ca412a05002b51d358c2e3061913c3dab6b810", "title": "MoFA: Model-Based Deep Convolutional Face Autoencoder for Unsupervised Monocular Reconstruction", "year": "2017", "pdf": "https://arxiv.org/pdf/1703.10580.pdf"}, {"id": "0d4d8ce029deead6f2ce7075047aa645299ddd41", "title": "Face Attributes Recognition via Deep Multi-Task Cascade", "year": 2017, "pdf": null}, {"id": "eef29a4fef85c7ed8acde9ca42f8f09d944f361d", "title": "Learning to Super-Resolve Blurry Face and Text Images", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237298"}, {"id": "b19f24ec92388513d1516d71292559417c776006", "title": "Causalgan: Learning Causal Implicit Gener-", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/b19f/24ec92388513d1516d71292559417c776006.pdf"}, {"id": "09879f7956dddc2a9328f5c1472feeb8402bcbcf", "title": "Density estimation using Real NVP", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/252f/a4df97106d1f55a55f3d3811a69ef2539731.pdf"}, {"id": "f8d68084931f296abfb5a1c4cd971f0b0294eaa4", "title": "Unconditional Generative Models", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/f8d6/8084931f296abfb5a1c4cd971f0b0294eaa4.pdf"}, {"id": "82821e227683d66543a303f4faddc1376a91a463", "title": "Learning Multi-grid Generative ConvNets by Minimal Contrastive Divergence", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/e3d7/6ee004a615acd71d4a30d265de7623303501.pdf"}, {"id": "7cca4d680152ed43e6dd8cc55d9ea55e2ed64eae", "title": "Image Restoration with Deep Generative Models", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462317"}, {"id": "72a6044a0108e0f8f1e68cd70ada46c81a416324", "title": "Improved Training of Generative Adversarial Networks Using Representative Features", "year": "2018", "pdf": "https://arxiv.org/pdf/1801.09195.pdf"}, {"id": "c9d7219d54eccb9e49b72044d805e103fe17ba80", "title": "Towards Information-Seeking Agents", "year": "2016", "pdf": "https://arxiv.org/pdf/1612.02605.pdf"}, {"id": "8d6d0fdf4811bc9572326d12a7edbbba59d2a4cc", "title": "SchiNet: Automatic Estimation of Symptoms of Schizophrenia from Facial Behaviour Analysis", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.02531.pdf"}, {"id": "9cc3172efb42d2f9fa1b9ae7b7eef9cc349cdef9", "title": "Imbalanced Deep Learning by Minority Class Incremental Rectification", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.10851.pdf"}, {"id": "0ee737085af468f264f57f052ea9b9b1f58d7222", "title": "SiGAN: Siamese Generative Adversarial Network for Identity-Preserving Face Hallucination", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.08370.pdf"}, {"id": "57235f22abcd6bb928007287b17e235dbef83347", "title": "Exemplar Guided Unsupervised Image-to-Image Translation with Semantic Consistency", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.11145.pdf"}, {"id": "69c8b0ec77d3164df2069a5133780a36ec8e91ad", "title": "Unsupervised 3D Reconstruction from a Single Image via Adversarial Learning", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/69c8/b0ec77d3164df2069a5133780a36ec8e91ad.pdf"}, {"id": "0e31e5e899e2c22d5871054f954f6dd01a33b9d0", "title": "Unsupervised Transformation Network Based on GANs for Target-Domain Oriented Image Translation", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8492399"}, {"id": "7fe7fe517119e39eab2ab7cc5f03103d9d1c03ee", "title": "Research of image deblurring based on the deep neural network", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8405801"}, {"id": "763b60feaabceebbe9eddfbaa0378b8b454327aa", "title": "Smile detection in the wild with deep convolutional neural networks", "year": "2016", "pdf": "http://doi.org/10.1007/s00138-016-0817-z"}, {"id": "2a6bba2e81d5fb3c0fd0e6b757cf50ba7bf8e924", "title": "Compare and Contrast: Learning Prominent Differences in Relative Attributes", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/2a6b/ba2e81d5fb3c0fd0e6b757cf50ba7bf8e924.pdf"}, {"id": "7f6cd03e3b7b63fca7170e317b3bb072ec9889e0", "title": "A Face Recognition Signature Combining Patch-based Features with Soft Facial Attributes", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/7f6c/d03e3b7b63fca7170e317b3bb072ec9889e0.pdf"}, {"id": "9da2b79c6942852e8076cdaa4d4c93eb1ae363f1", "title": "Constraint-Based Visual Generation", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.09202.pdf"}, {"id": "87a39f5002ef2de3143d1ea96ae19e002c44345b", "title": "Deep face attributes recognition using spatial transformer network", "year": 2016, "pdf": null}, {"id": "d9c0310203179d5328c4f1475fa4d68c5f0c7324", "title": "Face Analysis in the Wild", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI-T.2017.11"}, {"id": "419fec1a76d9233dcaa8d2c98ea622d19f663261", "title": "Unsupervised learning of object frames by dense equivariant image labelling", "year": "2017", "pdf": "https://arxiv.org/pdf/1706.02932.pdf"}, {"id": "bc17c2075d7f7bc414acc00a88ff5a464eedaebe", "title": "Solving Bilinear Inverse Problems using Deep Generative Priors", "year": "2018", "pdf": null}, {"id": "189eedfc81ee47b2b44caf8bfe816726697ba421", "title": "Facial Attributes Guided Deep Sketch-to-Photo Synthesis", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8347106"}, {"id": "2727927c7493cef9785b3a06a38f5c1ce126fc23", "title": "Semi-supervised FusedGAN for Conditional Image Generation", "year": "2018", "pdf": "https://arxiv.org/pdf/1801.05551.pdf"}, {"id": "424259e9e917c037208125ccc1a02f8276afb667", "title": "Walk and Learn: Facial Attribute Representation Learning from Egocentric Video and Contextual Data", "year": 2016, "pdf": "http://arxiv.org/pdf/1604.06433v1.pdf"}, {"id": "102a2096ba2e2947dc252445f764e7583b557680", "title": "Precomputed Real-Time Texture Synthesis with Markovian Generative Adversarial Networks", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/102a/2096ba2e2947dc252445f764e7583b557680.pdf"}, {"id": "06bd34951305d9f36eb29cf4532b25272da0e677", "title": "A Fast and Accurate System for Face Detection, Identification, and Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.07586.pdf"}, {"id": "6211ba456908d605e85d102d63b106f1acb52186", "title": "Visual Interpretability forDeepLearning", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/6211/ba456908d605e85d102d63b106f1acb52186.pdf"}, {"id": "f1aa120fb720f6cfaab13aea4b8379275e6d40a2", "title": "InverseFaceNet: Deep Single-Shot Inverse Face Rendering From A Single Image", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/f1aa/120fb720f6cfaab13aea4b8379275e6d40a2.pdf"}, {"id": "e3d76f1920c5bf4a60129516abb4a2d8683e48ae", "title": "I Know That Person: Generative Full Body and Face De-identification of People in Images", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014907"}, {"id": "c6c3cee8adacff8a63ab84dc847141315e874400", "title": "Disentangling by Factorising", "year": "2018", "pdf": "https://arxiv.org/pdf/1802.05983.pdf"}, {"id": "f672d6352a5864caab5a5a286fbc1ce042b55c16", "title": "Stabilizing GAN Training with Multiple Random Projections", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/f672/d6352a5864caab5a5a286fbc1ce042b55c16.pdf"}, {"id": "04fd269c96f11235fbbb985bb16dacedaa3098fd", "title": "Grouping-By-ID: Guarding Against Adversarial Domain Shifts", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/ef50/cf14e7756f90b59fca40170dc67c88591c5a.pdf"}, {"id": "fe030b87e3c985c9dedab130949e2868e3e5e7d5", "title": "Explaining Neural Networks Semantically", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/fe03/0b87e3c985c9dedab130949e2868e3e5e7d5.pdf"}, {"id": "76cb2ecc96f02b1d8a7a0d1681fbb55367a4b765", "title": "Learning Object States from Videos", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/76cb/2ecc96f02b1d8a7a0d1681fbb55367a4b765.pdf"}, {"id": "42f8ef9d5ebf969a7e2b4d1eef4b332db562e5d4", "title": "Which Training Methods for GANs do actually Converge?", "year": "2018", "pdf": "https://arxiv.org/pdf/1801.04406.pdf"}, {"id": "8ea9093542075bd8cc4928a4c671a95f363c61ef", "title": "Sliced-Wasserstein Autoencoder : An Embarrassingly Simple Generative Model", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/8ea9/093542075bd8cc4928a4c671a95f363c61ef.pdf"}, {"id": "2d2102d3fe127444e203a2ab11c2b3d5f56874cc", "title": "Wasserstein Auto-Encoders", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/a1e7/0251ec6f6dbb87cf2db78c77270acf3dff63.pdf"}, {"id": "c9b90cf9cdd901bd3072d6dfd8ddc523c55944b1", "title": "Adversarial Generator-Encoder Networks", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/c9b9/0cf9cdd901bd3072d6dfd8ddc523c55944b1.pdf"}, {"id": "0ed91520390ebdee13a0ac13d028f65d959bdc10", "title": "Hard Example Mining with Auxiliary Embeddings", "year": "", "pdf": "https://pdfs.semanticscholar.org/0ed9/1520390ebdee13a0ac13d028f65d959bdc10.pdf"}, {"id": "16f48e8b7f1f6c03c888e3f4664ce3fa1261296b", "title": "Steganographic Generative Adversarial Networks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/16f4/8e8b7f1f6c03c888e3f4664ce3fa1261296b.pdf"}, {"id": "fbbb0c1ac9b26047348fa1acfcc1e4b47fcd94c5", "title": "A Noise Robust Face Hallucination Framework Via Cascaded Model of Deep Convolutional Networks and Manifold Learning", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8486563"}, {"id": "0c0db39cac8cb76b52cfdbe10bde1c53d68d202f", "title": "Metric-based Generative Adversarial Network", "year": 2017, "pdf": "http://doi.acm.org/10.1145/3123266.3123334"}, {"id": "c68c391be18920ea1c065b714692dd968bf5a15d", "title": "Fusing Object Context to Detect Functional Area for Cognitive Robots", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8460590"}, {"id": "37381718559f767fc496cc34ceb98ff18bc7d3e1", "title": "Harnessing Synthesized Abstraction Images to Improve Facial Attribute Recognition", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/3738/1718559f767fc496cc34ceb98ff18bc7d3e1.pdf"}, {"id": "4563cbfbdba1779fc598081071ae40be021cb81d", "title": "Adversarial Attacks on Variational Autoencoders", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.04646.pdf"}, {"id": "08809165154c9c557d368cddfa3ae66ccaceaed9", "title": "Taming VAEs", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.00597.pdf"}, {"id": "a4ee9f089ab9a48a6517a6967281247339a51747", "title": "Resembled Generative Adversarial Networks: Two Domains with Similar Attributes", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.00947.pdf"}, {"id": "fdd33dd6c6463564c4756fdecbfc81be82834f73", "title": "Laplacian Pyramid of Conditional Variational Autoencoders", "year": "2017", "pdf": "http://dl.acm.org/citation.cfm?id=3150172"}, {"id": "9ce4541d21ee3511bf3dc55bc3cd01222194d95a", "title": "Face inpainting based on high-level facial attributes", "year": 2017, "pdf": "https://doi.org/10.1016/j.cviu.2017.05.008"}, {"id": "04221205249bdffd0f155ac68ac477613654aa42", "title": "Semantic facial scores and compact deep transferred descriptors for scalable face image retrieval", "year": "2018", "pdf": "http://doi.org/10.1016/j.neucom.2018.04.056"}, {"id": "35800a537017803dd08274710388734db66b54f0", "title": "Sliced Wasserstein Generative Models", "year": "2017", "pdf": "https://arxiv.org/pdf/1706.02631.pdf"}, {"id": "ea94d834f912f092618d030f080de8395fe39b3f", "title": "Joint autoencoders : a flexible meta-learning framework", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/3027/8a460e2936596b671d20599f2598c4d284ed.pdf"}, {"id": "e0162dea3746d58083dd1d061fb276015d875b2e", "title": "Unconstrained Face Alignment Without Face Detection", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014992"}, {"id": "9a989c7032051566d3ade03e5650ea6a41a5a9ed", "title": "Building an automatic sprite generator with deep convolutional generative adversarial networks", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8080426"}, {"id": "0ae192e146431a52d7bb51923e9bdd7292ab12ef", "title": "Multi-Generator Generative Adversarial Nets", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/f0c5/7fcbc2138c686ded342030bef5476441a430.pdf"}, {"id": "f8796b8e8246ce41efb2904c053fe0ea2868e373", "title": "A Variational U-Net for Conditional Appearance and Shape Generation", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.04694.pdf"}, {"id": "d7fe2a52d0ad915b78330340a8111e0b5a66513a", "title": "Photo-to-Caricature Translation on Faces in the Wild", "year": "2017", "pdf": "https://arxiv.org/pdf/1711.10735.pdf"}, {"id": "e260847323b48a79bd88dd95a1499cd3053d3645", "title": "Reconstructing perceived faces from brain activations with deep adversarial neural decoding", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/937b/78204a7ac4aac6a8782f29c68322621dc1c2.pdf"}, {"id": "b7c4fe5c89df51ebd1f89a34c66b94cc6019d8e6", "title": "Model Cards for Model Reporting", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.03993.pdf"}, {"id": "f29a24ee71940aa46b2c3438d4ddb89b33acdbc4", "title": "Towards High-Resolution Face Pose Synthesis", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8488993"}, {"id": "4e97b53926d997f451139f74ec1601bbef125599", "title": "Discriminative Regularization for Generative Models", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4e97/b53926d997f451139f74ec1601bbef125599.pdf"}, {"id": "21011f38e721e74c3979ec6f3426aec811423640", "title": "Identity-preserving Conditional Generative Adversarial Network", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8489282"}, {"id": "b362b812ececef21100d7a702447fcf5ab6d4715", "title": "Understanding and Improving Interpolation in Autoencoders via an Adversarial Regularizer", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.07543.pdf"}, {"id": "dd8084b2878ca95d8f14bae73e1072922f0cc5da", "title": "Model Distillation with Knowledge Transfer in Face Classification, Alignment and Verification", "year": "2017", "pdf": "https://arxiv.org/pdf/1709.02929.pdf"}, {"id": "a562180056cc4906d6d5ef9d2b4ed098d8512317", "title": "Dropout-GAN: Learning from a Dynamic Ensemble of Discriminators", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11346.pdf"}, {"id": "70e14e216b12bed2211c4df66ef5f0bdeaffe774", "title": "Attribute-Enhanced Face Recognition with Neural Tensor Fusion Networks", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237666"}, {"id": "d84263e22c7535cb1a2a72c88780d5a407bd9673", "title": "Stability of Scattering Decoder For Nonlinear Diffractive Imaging", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.08015.pdf"}, {"id": "0dcdef6b8d97483f4d4dab461e1cb5b3c4d1fe1a", "title": "Probabilistic Semantic Inpainting with Pixel Constrained CNNs", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.03728.pdf"}, {"id": "56bc524d7cc1ff2fad8f27c0414cac437fc2b4f0", "title": "Protest Activity Detection and Perceived Violence Estimation from Social Media Images", "year": "2017", "pdf": "https://arxiv.org/pdf/1709.06204.pdf"}, {"id": "15aa6c457678e25f6bc0e818e5fc39e42dd8e533", "title": "Conditional Image Generation for Learning the Structure of Visual Objects", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.07823.pdf"}, {"id": "07a8a4b8f207b2db2a19e519027f70cd1c276294", "title": "Pixel Recursive Super Resolution", "year": 2017, "pdf": "https://arxiv.org/pdf/1702.00783v2.pdf"}, {"id": "35f3c4012e802332faf0a1426e9acf8365601551", "title": "Bidirectional Conditional Generative Adversarial Networks", "year": "2017", "pdf": "https://arxiv.org/pdf/1711.07461.pdf"}, {"id": "751e11880b54536a89bfcc4fd904b0989345a601", "title": "Hierarchical Adversarially Learned Inference", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/751e/11880b54536a89bfcc4fd904b0989345a601.pdf"}, {"id": "9817e0d11701e9ce0e31a32338ff3ff0969621ed", "title": "Dppnet: Approximating Determinantal Point Processes with Deep Networks", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/9817/e0d11701e9ce0e31a32338ff3ff0969621ed.pdf"}, {"id": "a7e5a46e47dd21cc9347b913dd3dde2f0ad832ed", "title": "On denoising autoencoders trained to minimise binary cross-entropy", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/a7e5/a46e47dd21cc9347b913dd3dde2f0ad832ed.pdf"}, {"id": "621ea1f1e364262348135c803557e7b3454a804e", "title": "Generative spatiotemporal modeling of neutrophil behavior", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.00393.pdf"}, {"id": "6f6b4e2885ea1d9bea1bb2ed388b099a5a6d9b81", "title": "Structured Output SVM Prediction of Apparent Age, Gender and Smile from Deep Features", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.96"}, {"id": "d1d6f1d64a04af9c2e1bdd74e72bd3ffac329576", "title": "Neural Face Editing with Intrinsic Image Disentangling", "year": "2017", "pdf": "https://arxiv.org/pdf/1704.04131.pdf"}, {"id": "f792f75f6d2bf265569d4e63dd139c4d04ec7fdb", "title": "Introspective Neural Networks for Generative Modeling", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237564"}, {"id": "56f5005c4be6f816f6f43795cc4825d798cd53ef", "title": "GANs Trained by a Two Time-Scale Update Rule Converge to a Nash Equilibrium", "year": "2017", "pdf": null}, {"id": "b8658fc3b17e75afce025bcbb161dd02e7004b1f", "title": "Deep Mesh Projectors for Inverse Problems", "year": "2018", "pdf": null}, {"id": "09137e3c267a3414314d1e7e4b0e3a4cae801f45", "title": "Two Birds with One Stone: Transforming and Generating Facial Images with Iterative GAN", "year": "2017", "pdf": "https://arxiv.org/pdf/1711.06078.pdf"}, {"id": "c8dcb7b3c5ed43e61b90b50fedc76568d8e30675", "title": "Guarding against Adversarial Domain Shifts", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/c8dc/b7b3c5ed43e61b90b50fedc76568d8e30675.pdf"}, {"id": "e6178de1ef15a6a973aad2791ce5fbabc2cb8ae5", "title": "Improving Facial Landmark Detection via a Super-Resolution Inception Network", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/e617/8de1ef15a6a973aad2791ce5fbabc2cb8ae5.pdf"}, {"id": "a157ebc849d57ccff00a52a68b24e4ac8eba9536", "title": "The Contextual Loss for Image Transformation with Non-aligned Data", "year": "2018", "pdf": "https://arxiv.org/pdf/1803.02077.pdf"}, {"id": "22f656d0f8426c84a33a267977f511f127bfd7f3", "title": "From Facial Expression Recognition to Interpersonal Relation Prediction", "year": 2017, "pdf": "http://arxiv.org/abs/1609.06426"}, {"id": "c05ae45c262b270df1e99a32efa35036aae8d950", "title": "Predicting Facial Attributes in Video Using Temporal Coherence and Motion-Attention", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354120"}, {"id": "5da53a17165fcc64e8fb6e9ca532bfb6d95ff622", "title": "RSCM: Region Selection and Concurrency Model for Multi-Class Weather Recognition", "year": 2017, "pdf": "http://www.cse.cuhk.edu.hk/~leojia/papers/rscm_pami17.pdf"}, {"id": "82088af865626e2340db12b2e42f3a258053d593", "title": "Learning Generative ConvNets via Multi-grid Modeling and Sampling", "year": "2017", "pdf": "https://arxiv.org/pdf/1709.08868.pdf"}, {"id": "3352426a67eabe3516812cb66a77aeb8b4df4d1b", "title": "Joint Multi-view Face Alignment in the Wild", "year": "2017", "pdf": "https://arxiv.org/pdf/1708.06023.pdf"}, {"id": "2a8aedea2031128868f1c6dd44329c5bb7afc419", "title": "A Convex Duality Framework for GANs", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.11740.pdf"}, {"id": "878301453e3d5cb1a1f7828002ea00f59cbeab06", "title": "Faceness-Net: Face Detection through Deep Facial Part Responses", "year": "2018", "pdf": "https://arxiv.org/pdf/1701.08393.pdf"}, {"id": "2c94682176f320f406f78c484f9135f085d1c0f0", "title": "Geometric Enclosing Networks", "year": "2017", "pdf": null}, {"id": "8818dafda0cf230731ac2f962d8591c89a9fac09", "title": "xGEMs: Generating Examplars to Explain Black-Box Models", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.08867.pdf"}, {"id": "c14fe5e69383fa9dfbd256965df06a99fae5558d", "title": "Multi-view Adversarially Learned Inference for Cross-domain Joint Distribution Matching", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3219957"}, {"id": "ec3eb92b9a56b1fa84b127b8acc980555cd1f2e0", "title": "Channel-Recurrent Variational Autoencoders", "year": "2017", "pdf": null}, {"id": "a4cd3fc63ddc8468d3f684f32cb0578e41fed226", "title": "Generative Adversarial Style Transfer Networks for Face Aging", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/ea7d/ff897a6618a5ae9c7fed19899ac0d3a4a04e.pdf"}, {"id": "6baaa8b763cc5553715766e7fbe7abb235fae33c", "title": "Facial Attributes Classification Using Multi-task Representation Learning", "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789589"}, {"id": "6a9e240e5d84b33c0835cd85c96c70ad5ffdc49c", "title": "Photographic image synthesis with improved U-net", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8377492"}, {"id": "d2860bb05f747e4628e95e4d84018263831bab0d", "title": "Learning to Generate Samples from Noise through Infusion Training", "year": "2017", "pdf": "https://arxiv.org/pdf/1703.06975.pdf"}, {"id": "73c9cbbf3f9cea1bc7dce98fce429bf0616a1a8c", "title": "Unsupervised Learning of Object Landmarks by Factorized Spatial Embeddings", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.02193.pdf"}, {"id": "0ad318510969560e2fca3d7b257e6b6f7a541b3e", "title": "High-Resolution Deep Convolutional Generative Adversarial Networks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/f700/fb90806160810967d1f7c485f723b8861b2d.pdf"}, {"id": "6bb95a0f3668cd36407c85899b71c9fe44bf9573", "title": "Face Attribute Prediction Using Off-The-Shelf Deep Learning Networks", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/6bb9/5a0f3668cd36407c85899b71c9fe44bf9573.pdf"}, {"id": "bfffcd2818a1679ac7494af63f864652d87ef8fa", "title": "Neural Importance Sampling", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.03856.pdf"}, {"id": "f67afec4226aba674e786698b39b85b124945ddd", "title": "Spatial Variational Auto-Encoding via Matrix-Variate Normal Distributions", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/f67a/fec4226aba674e786698b39b85b124945ddd.pdf"}, {"id": "7b07a87ff71b85f3493d1944034a960917b8482f", "title": "Alternating BackPropagation for Generator Network", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/7b07/a87ff71b85f3493d1944034a960917b8482f.pdf"}, {"id": "02b0bf28f34c3c403abecd2fb4fb7d4969c0e0db", "title": "Learning Disentangled Joint Continuous and Discrete Representations", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.00104.pdf"}, {"id": "63db76fc3ab23beb921be682d70eb021cb6c4f16", "title": "How Polarized Have We Become? A Multimodal Classification of Trump Followers and Clinton Followers", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/63db/76fc3ab23beb921be682d70eb021cb6c4f16.pdf"}, {"id": "442ee5a3f51ca93a642c20fa69326f3b17367565", "title": "Detection of rail surface defects based on CNN image recognition and classification", "year": 2018, "pdf": null}, {"id": "c5b311152a4e611288a77fbb1460eb0fbb049de3", "title": "An Efficient Training Strategy for Face Detector in Specific Scenes", "year": 2016, "pdf": null}, {"id": "273785b386eaf01be96e217a2a8aa1c2ee694c2e", "title": "ReRAM-based accelerator for deep learning", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8342118"}, {"id": "4eaaefc53fd61d27b9ce310c188fe76003a341bd", "title": "Assessing Generative Models via Precision and Recall", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.00035.pdf"}, {"id": "28e1982d20b6eff33989abbef3e9e74400dbf508", "title": "Automated kinship verification and identification through human facial images: a survey", "year": "2015", "pdf": "http://doi.org/10.1007/s11042-015-3007-5"}, {"id": "edcb662834aae8878a209c769ed664f8bd48b751", "title": "Imagining the Unimaginable Faces by Deconvolutional Networks", "year": 2018, "pdf": null}, {"id": "d5c4e3c101041556e00b25c0dcb09716827ed5b3", "title": "Unsupervised Image-to-Image Translation with Generative Adversarial Networks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/d5c4/e3c101041556e00b25c0dcb09716827ed5b3.pdf"}, {"id": "330b3db69f70f01afd674a2b7bce4bb5000bf164", "title": "Learning the Base Distribution in Implicit Generative Models", "year": "2018", "pdf": null}, {"id": "6066e13aea80f64b6ad1415cfc3839c1f8590c04", "title": "Grouping-By-ID : Guarding Against Adversarial Domain Shifts", "year": "2017", "pdf": null}, {"id": "40638a7a9e0a0499af46053c6efc05ce0b088a28", "title": "Which Training Methods for GANs do actually Converge?", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/4063/8a7a9e0a0499af46053c6efc05ce0b088a28.pdf"}, {"id": "0ad4a814b30e096ad0e027e458981f812c835aa0", "title": "Leveraging mid-level deep representations for predicting face attributes in the wild", "year": 2016, "pdf": "http://arxiv.org/pdf/1602.01827v1.pdf"}, {"id": "b1c80444ecf42c303dbf65e47bea999af7a172bf", "title": "Exploring Generative Perspective of Convolutional Neural Networks by Learning Random Field Models", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/b1c8/0444ecf42c303dbf65e47bea999af7a172bf.pdf"}, {"id": "e0515dc0157a89de48e1120662afdd7fe606b544", "title": "Perception Science in the Age of Deep Neural Networks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/e051/5dc0157a89de48e1120662afdd7fe606b544.pdf"}, {"id": "d98a36081a434451184fa4becb59bf5ec55f3a1e", "title": "Computational face reader based on facial attribute estimation", "year": 2017, "pdf": "https://doi.org/10.1016/j.neucom.2016.09.110"}, {"id": "2f88d3189723669f957d83ad542ac5c2341c37a5", "title": "Attribute-correlated local regions for deep relative attributes learning", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/2f88/d3189723669f957d83ad542ac5c2341c37a5.pdf"}, {"id": "8f772d9ce324b2ef5857d6e0b2a420bc93961196", "title": "Facial Landmark Point Localization using Coarse-to-Fine Deep Recurrent Neural Network", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.01760.pdf"}, {"id": "e23ed8642a719ff1ab08799257d9566ed3bba403", "title": "Unsupervised Visual Attribute Transfer with Reconfigurable Generative Adversarial Networks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/e23e/d8642a719ff1ab08799257d9566ed3bba403.pdf"}, {"id": "2e10560579f2bdeae0143141f26bd9f0a195b4b7", "title": "Mixed Precision Training", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/2e10/560579f2bdeae0143141f26bd9f0a195b4b7.pdf"}, {"id": "c570f70459af243af9ca73709646239d82d07655", "title": "Cumulative attributes for pain intensity estimation", "year": 2017, "pdf": "http://doi.acm.org/10.1145/3136755.3136789"}, {"id": "57fd8bafa4526b9a56fe43fac22dd62b2ab94563", "title": "Beyond Shared Hierarchies: Deep Multitask Learning through Soft Layer Ordering", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/57fd/8bafa4526b9a56fe43fac22dd62b2ab94563.pdf"}, {"id": "96fc93175169b788acd98f0a676dffab00651cbc", "title": "On Matching Faces with Alterations due to Plastic Surgery and Disguise", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/96fc/93175169b788acd98f0a676dffab00651cbc.pdf"}, {"id": "b7ccfc78cb54525f9cba996b73c780068a05527e", "title": "Task-Aware Compressed Sensing With Generative Adversarial Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1802.01284.pdf"}, {"id": "725597072c76dad5caa92b7baa6e1c761addc300", "title": "Deep adversarial neural decoding", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.07109.pdf"}, {"id": "471908e99d6965f0f6d249c9cd013485dc2b21df", "title": "Many Paths to Equilibrium: GANs Do Not Need to Decrease a Divergence At Every Step", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/4719/08e99d6965f0f6d249c9cd013485dc2b21df.pdf"}, {"id": "bc995457cf5f4b2b5ef62106856571588d7d70f2", "title": "Comparison of Maximum Likelihood and GAN-based training of Real NVPs", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/bc99/5457cf5f4b2b5ef62106856571588d7d70f2.pdf"}, {"id": "5d7070067a75f57c841d0d30b23e21101da606b2", "title": "Generative Modeling using the Sliced Wasserstein Distance", "year": "2018", "pdf": "https://arxiv.org/pdf/1803.11188.pdf"}, {"id": "3e4bd583795875c6550026fc02fb111daee763b4", "title": "Convolutional Sketch Inversion", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/3e4b/d583795875c6550026fc02fb111daee763b4.pdf"}, {"id": "ba788365d70fa6c907b71a01d846532ba3110e31", "title": "Robust Conditional Generative Adversarial Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.08657.pdf"}, {"id": "62b90583723174220b26c92bd67f6c422ad75570", "title": "Dna-gan: Learning Disentangled Represen-", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/62b9/0583723174220b26c92bd67f6c422ad75570.pdf"}, {"id": "1dd3faf5488751c9de10977528ab96be24616138", "title": "Detecting Anomalous Faces with 'No Peeking' Autoencoders", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/1dd3/faf5488751c9de10977528ab96be24616138.pdf"}, {"id": "b1ffd13e8f68401a603eea9806bc37e396a3c77d", "title": "Face Generation with Conditional Generative Adversarial Networks", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/b1ff/d13e8f68401a603eea9806bc37e396a3c77d.pdf"}, {"id": "3d38022d7ba71e865ca406d28acd3fe547024319", "title": "Unsupervised Local Facial Attributes Transfer Using Dual Discriminative Adversarial Networks", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8486562"}, {"id": "e68ef9597613cd2b6cf76e81c13eb061ee468485", "title": "Latent Convolutional Models", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.06284.pdf"}, {"id": "cc2bb4318191a04e3fc82c008c649f5b90151e4d", "title": "Beyond Shared Hierarchies: Deep Multitask Learning through Soft Layer Ordering", "year": "2017", "pdf": "https://arxiv.org/pdf/1711.00108.pdf"}, {"id": "cfcb4d0d9ba7eb86f068c4fe0f9e6676a37481bc", "title": "Max-Boost-GAN: Max Operation to Boost Generative Ability of Generative Adversarial Networks", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265349"}, {"id": "edd6b6cd62d4c3b5d288721510e579be62c941d6", "title": "Conditional image generation using feature-matching GAN", "year": 2017, "pdf": "https://doi.org/10.1109/CISP-BMEI.2017.8302049"}, {"id": "a022eff5470c3446aca683eae9c18319fd2406d5", "title": "Deep learning for semantic description of visual human traits. (Apprentissage profond pour la description s\u00e9mantique des traits visuels humains)", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/a022/eff5470c3446aca683eae9c18319fd2406d5.pdf"}, {"id": "9941a408ae031d1254bbc0fe7a63fac5f85fe347", "title": "Neural Processes", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.01622.pdf"}, {"id": "de60ee528db713d264ffea870b33f8be054fb8c7", "title": "A Classification-Based Perspective on GAN Distributions", "year": "2017", "pdf": null}, {"id": "a91fd02ed2231ead51078e3e1f055d8be7828d02", "title": "The Robust Manifold Defense: Adversarial Training using Generative Models", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/a91f/d02ed2231ead51078e3e1f055d8be7828d02.pdf"}, {"id": "57932806423204b445bac4abeaede97edb90fa03", "title": "Edge-guided generative adversarial network for image inpainting", "year": 2017, "pdf": "https://doi.org/10.1109/VCIP.2017.8305138"}, {"id": "c231d8638e8b5292c479d20f7dd387c53e581a1a", "title": "Multi-View Data Generation Without View Supervision", "year": "2017", "pdf": "https://arxiv.org/pdf/1711.00305.pdf"}, {"id": "2baea24cc71793ba40cf738b7ad1914f0e549863", "title": "Attribute Augmented Convolutional Neural Network for Face Hallucination", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/2bae/a24cc71793ba40cf738b7ad1914f0e549863.pdf"}, {"id": "e1740c8a562901ac1b94c78b33c4416500cedebc", "title": "Joint-VAE: Learning Disentangled Joint Continuous and Discrete Representations", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/e174/0c8a562901ac1b94c78b33c4416500cedebc.pdf"}, {"id": "a8760dc83e8cb88f241cc206855fbbad680889a0", "title": "Bregman learning for generative adversarial networks", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8407110"}, {"id": "4efb08fcd652c60764b6fd278cee132b71c612a1", "title": "Pixel Deconvolutional Networks", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.06820.pdf"}, {"id": "db0d33590dc15de2d30cf0407b7a26ae79cd51b5", "title": "Deep Probabilistic Modeling of Natural Images using a Pyramid Decomposition", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/db0d/33590dc15de2d30cf0407b7a26ae79cd51b5.pdf"}, {"id": "021e008282714eaefc0796303f521c9e4f199d7e", "title": "NCC-Net: Normalized Cross Correlation Based Deep Matcher with Robustness to Illumination Variations", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354319"}, {"id": "eb72dcf0ba423d0e12d63cd7881f2ac5dfda7984", "title": "Associative Compression Networks", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/eb72/dcf0ba423d0e12d63cd7881f2ac5dfda7984.pdf"}, {"id": "a54d63c1a8c4db3c5034b1fdb08526459bb3c0b1", "title": "Multi-Gait Recognition Based on Attribute Discovery", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7976333"}, {"id": "69adf2f122ff18848ff85e8de3ee3b2bc495838e", "title": "Arbitrary Facial Attribute Editing: Only Change What You Want", "year": "2017", "pdf": null}, {"id": "06560d5721ecc487a4d70905a485e22c9542a522", "title": "Deep Facial Attribute Detection in the Wild: From General to Specific", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/0656/0d5721ecc487a4d70905a485e22c9542a522.pdf"}, {"id": "e21c45b14d75545d40ed07896f26ec6f766f6a4b", "title": "Fisher GAN", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.09675.pdf"}, {"id": "06be17bcc4136476855fc594759dddc6f8b6150f", "title": "MMGAN: Manifold Matching Generative Adversarial Network for Generating Images", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/f8c9/31bff2521a8447b3483b46c07b1b4f888f52.pdf"}, {"id": "6b327af674145a34597986ec60f2a49cff7ed155", "title": "Defense-gan: Protecting Classifiers against Adversarial Attacks Using Generative Models", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/ffc0/e60668ce70b9b68d15f84a49bf0988be632d.pdf"}, {"id": "6f900e683ea1fc85825a403d1ba2df7875f35bb9", "title": "Joint-VAE: Learning Disentangled Joint Continuous and Discrete Representations", "year": "2018", "pdf": null}, {"id": "8929e704b6af7f09ad027714b75972cb9df57483", "title": "Image Inpainting for Irregular Holes Using Partial Convolutions", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.07723.pdf"}, {"id": "5a9103153c7f36d8a28bfd66e89ff05c93129415", "title": "Multi-task convolutional neural network for car attribute recognition", "year": 2017, "pdf": null}, {"id": "33658ee91ae67f3c92542dd0f0838b48c994ae4d", "title": "Robust Head Detection in Collaborative Learning Environments Using AM-FM Representations", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8470355"}, {"id": "c03ef6e94808185c1080ac9b155ac3b159b4f1ec", "title": "Learning to Avoid Errors in GANs by Manipulating Input Spaces", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/c03e/f6e94808185c1080ac9b155ac3b159b4f1ec.pdf"}, {"id": "14a022a3eb8cc9681b1ab075650d462788de1fa0", "title": "GANs for Biological Image Synthesis", "year": "2017", "pdf": "https://arxiv.org/pdf/1708.04692.pdf"}, {"id": "f0483ebab9da2ba4ae6549b681cf31aef2bb6562", "title": "3c-gan: an Condition-context-composite Generative Adversarial Networks for Gen-", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/f048/3ebab9da2ba4ae6549b681cf31aef2bb6562.pdf"}, {"id": "91edca64a666c46b0cbca18c3e4938e557eeb21a", "title": "Guiding InfoGAN with Semi-supervision", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/91ed/ca64a666c46b0cbca18c3e4938e557eeb21a.pdf"}, {"id": "23fd82c04852b74d655015ff0876e6c5defc6e61", "title": "Deep-based Ingredient Recognition for Cooking Recipe Retrieval", "year": 2016, "pdf": "http://doi.acm.org/10.1145/2964284.2964315"}, {"id": "b04d4b1e8b510180726f49a66dbaaf23c9ef64a0", "title": "Introspective Generative Modeling: Decide Discriminatively", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/b04d/4b1e8b510180726f49a66dbaaf23c9ef64a0.pdf"}, {"id": "e8c6853135856515fc88fff7c55737a292b0a15b", "title": "BoxFlow: Unsupervised Face Detector Adaptation from Images to Videos", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.46"}, {"id": "e0082ae9e466f7c855fb2c2300215ced08f61432", "title": "Generative Temporal Models with Spatial Memory for Partially Observed Environments", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.09401.pdf"}, {"id": "ae2b2493f35cecf1673eb3913fdce37e037b53a2", "title": "Optimal Transport Maps for Distribution Pre- Serving Operations on Latent Spaces of Gener-", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/ae2b/2493f35cecf1673eb3913fdce37e037b53a2.pdf"}, {"id": "3d0b2da6169d38b56c58fe5f13342cf965992ece", "title": "Spatio-temporal representation for face authentication by using multi-task learning with human attributes", "year": 2016, "pdf": "https://doi.org/10.1109/ICIP.2016.7532909"}, {"id": "b64cc1f0772e9620ecf916019de85b7adb357b7a", "title": "Fast Face-Swap Using Convolutional Neural Networks", "year": "2017", "pdf": "https://arxiv.org/pdf/1611.09577.pdf"}, {"id": "fac36fa1b809b71756c259f2c5db20add0cb0da0", "title": "Transferring GANs: Generating Images from Limited Data", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.01677.pdf"}, {"id": "31af1f2614823504d1d643d1b019c6f9d2150b15", "title": "Super-FAN: Integrated facial landmark localization and super-resolution of real-world low resolution faces in arbitrary poses with GANs", "year": "2017", "pdf": "https://arxiv.org/pdf/1712.02765.pdf"}, {"id": "ddefb92908e6174cf48136ae139efbb4bd198896", "title": "Feature-wise Bias Amplification", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/ddef/b92908e6174cf48136ae139efbb4bd198896.pdf"}, {"id": "708f4787bec9d7563f4bb8b33834de445147133b", "title": "Wavelet-SRNet: A Wavelet-Based CNN for Multi-scale Face Super Resolution", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237449"}, {"id": "d979dbc55f73304a5d839079c070062e0b3ddbc5", "title": "Deep Learning Markov Random Field for Semantic Segmentation", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/d979/dbc55f73304a5d839079c070062e0b3ddbc5.pdf"}, {"id": "6dcf418c778f528b5792104760f1fbfe90c6dd6a", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984"}, {"id": "c3293ef751d3fb041bd3016fbc3fa5cc16f962fa", "title": "Inferencing Based on Unsupervised Learning of Disentangled Representations", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/c329/3ef751d3fb041bd3016fbc3fa5cc16f962fa.pdf"}, {"id": "61f04606528ecf4a42b49e8ac2add2e9f92c0def", "title": "Deep Deformation Network for Object Landmark Localization", "year": "2016", "pdf": "https://arxiv.org/pdf/1605.01014.pdf"}, {"id": "7c2174a02f355a00f1fd5aac6dd62c84a919a2d1", "title": "Normal Residual Blocks Albedo Residual Blocks Light Estimator SH light Normal Conv . Albedo Conv . Conv . Normal Albedo Shading Image Recon", "year": "2017", "pdf": null}, {"id": "9d8978ee319d671283a90761aaed150c7cc9154b", "title": "Fader Networks: Manipulating Images by Sliding Attributes", "year": "2017", "pdf": "https://arxiv.org/pdf/1706.00409.pdf"}, {"id": "801a80f7a18fccb2e8068996a73aee2cf04ae460", "title": "Optimal transport maps for distribution preserving operations on latent spaces of Generative Models", "year": "2017", "pdf": "https://arxiv.org/pdf/1711.01970.pdf"}, {"id": "f553f8022b1417bc7420523220924b04e3f27b8e", "title": "Finding your Lookalike: Measuring Face Similarity Rather than Face Identity", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.05252.pdf"}, {"id": "b768cb6fc2616f3dbe9ef4e25dedd7d95781ba66", "title": "Distribution Matching in Variational Inference", "year": "2018", "pdf": "https://arxiv.org/pdf/1802.06847.pdf"}, {"id": "e530b5dbced106b72ecd0d1ef542d2c9eaf00856", "title": "ReGAN: A pipelined ReRAM-based accelerator for generative adversarial networks", "year": 2018, "pdf": "http://dl.acm.org/citation.cfm?id=3201645"}, {"id": "8bddd0afd064e2d45ab6cf9510f2631f7438c17b", "title": "Outlier Detection using Generative Models with Theoretical Performance Guarantees", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.11335.pdf"}, {"id": "101569eeef2cecc576578bd6500f1c2dcc0274e2", "title": "Multiaccuracy: Black-Box Post-Processing for Fairness in Classification", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.12317.pdf"}, {"id": "56fd4c05869e11e4935d48aa1d7abb96072ac242", "title": "OpenFace 2.0: Facial Behavior Analysis Toolkit", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373812"}, {"id": "e309632d479b8f59e615d0f3c4bc69938361d187", "title": "Deep Learning for Imbalance Data Classification using Class Expert Generative Adversarial Network", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.04585.pdf"}, {"id": "db1a9b8d8ce9a5696a96f8db4206b6f72707730e", "title": "Cross-Modal Facial Attribute Recognition with Geometric Features", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961838"}, {"id": "d5cf6a02f8308e948e3bcd1fd1ca660ea8ea8921", "title": "G Enerative Networks as Inverse Problems with Scattering Transforms", "year": "", "pdf": "http://pdfs.semanticscholar.org/d5cf/6a02f8308e948e3bcd1fd1ca660ea8ea8921.pdf"}, {"id": "e9afb44fa1bf048e90d68f755945bc2b81642239", "title": "Data-Driven Geometric Face Image Smilization Featuring Moving Least Square Based Deformation", "year": 2017, "pdf": "https://doi.org/10.1109/BigMM.2017.86"}, {"id": "9e0285debd4b0ba7769b389181bd3e0fd7a02af6", "title": "From Face Images and Attributes to Attributes", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/9e02/85debd4b0ba7769b389181bd3e0fd7a02af6.pdf"}, {"id": "6b2db002cbc5312e4796de4d4b14573df2c01648", "title": "Learning Hierarchical Features from Deep Generative Models", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/6b2d/b002cbc5312e4796de4d4b14573df2c01648.pdf"}, {"id": "389b2390fd310c9070e72563181547cf23dceea3", "title": "\u0392-vae: Learning Basic Visual Concepts with a Constrained Variational Framework", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/389b/2390fd310c9070e72563181547cf23dceea3.pdf"}, {"id": "af6cae71f24ea8f457e581bfe1240d5fa63faaf7", "title": "Multi-Task Zipping via Layer-wise Neuron Sharing", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.09791.pdf"}, {"id": "788a3faa14ca191d7f187b812047190a70798428", "title": "Interpretable Set Functions", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.00050.pdf"}, {"id": "cb2470aade8e5630dcad5e479ab220db94ecbf91", "title": "Exploring Facial Differences in European Countries Boundary by Fine-Tuned Neural Networks", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397018"}, {"id": "1667b7c68e733f95f81c12e6cac73e5f659abca1", "title": "Distributionally Robust Games: Wasserstein Metric", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8489636"}, {"id": "d59404354f84ad98fa809fd1295608bf3d658bdc", "title": "Face Synthesis from Visual Attributes via Sketch using Conditional VAEs and GANs", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/d594/04354f84ad98fa809fd1295608bf3d658bdc.pdf"}, {"id": "d3d887aebeeae44cefd5c2bdbb388d9ce109e335", "title": "Image Manipulation with Perceptual Discriminators", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.01396.pdf"}, {"id": "28121cd9150250fe51de62521065c7e2246a73e9", "title": "Blind Image Deconvolution using Deep Generative Priors", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/bc17/c2075d7f7bc414acc00a88ff5a464eedaebe.pdf"}, {"id": "92e5708ed3b622ca1f0f6ac28ffd6e789c528cdf", "title": "Adversarial Inversion : Inverse Graphics with Adversarial Priors", "year": "2017", "pdf": null}, {"id": "147b7998526ebbdf64b1662503b378d9f6456ccd", "title": "Generative Adversarial Networks for Image Steganography", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/147b/7998526ebbdf64b1662503b378d9f6456ccd.pdf"}, {"id": "7910d3a86e03f4c41fbbe8029fab115547be151b", "title": "Taming Adversarial Domain Transfer with Structural Constraints for Image Enhancement", "year": "2017", "pdf": "https://arxiv.org/pdf/1712.00598.pdf"}, {"id": "372bc106c61e7eb004835e85bbfee997409f176a", "title": "Coupled Generative Adversarial Networks", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4248/12cef92a364e03222488ba246e8c3b1f06a3.pdf"}, {"id": "1cce875bf085602a2b0e486eb37dadc47e4efbb4", "title": "An optimized skin texture model using gray-level co-occurrence matrix", "year": 2017, "pdf": null}, {"id": "e3582dffe5f3466cc5bc9d736934306c551ab33c", "title": "AttGAN: Facial Attribute Editing by Only Changing What You Want", "year": "2017", "pdf": "https://arxiv.org/pdf/1711.10678.pdf"}, {"id": "346578304ff943b97b3efb1171ecd902cb4f6081", "title": "Generative Multi-Adversarial Networks", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/36f7/a8e1eaa21c6de5de0c80d4a08f6459fd3677.pdf"}, {"id": "7abac083402d44a96769f9e68c8f6ad84ba80472", "title": "Beyond Eleven Color Names for Image Understanding", "year": "2017", "pdf": "http://doi.org/10.1007/s00138-017-0902-y"}, {"id": "231af7dc01a166cac3b5b01ca05778238f796e41", "title": "GANs Trained by a Two Time-Scale Update Rule Converge to a Nash Equilibrium", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/231a/f7dc01a166cac3b5b01ca05778238f796e41.pdf"}, {"id": "73d57e2c855c39b4ff06f2d7394ab4ea35f597d4", "title": "First Order Generative Adversarial Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1802.04591.pdf"}, {"id": "654ad3b6f7c6de7184a9e8eec724e56274f27e3f", "title": "Alternating Back-Propagation for Generator Network", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/654a/d3b6f7c6de7184a9e8eec724e56274f27e3f.pdf"}, {"id": "3355aff37b5e4ba40fc689119fb48d403be288be", "title": "Deep Private-Feature Extraction", "year": "2018", "pdf": "https://arxiv.org/pdf/1802.03151.pdf"}, {"id": "45824905119ec09447d60e1809434062d5f4c1e4", "title": "Detecting Smiles of Young Children via Deep Transfer Learning", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265406"}, {"id": "62007c30f148334fb4d8975f80afe76e5aef8c7f", "title": "Eye In-Painting with Exemplar Generative Adversarial Networks", "year": "2017", "pdf": "https://arxiv.org/pdf/1712.03999.pdf"}, {"id": "be4c2b6fdde83179dd601541f57ee5d14fe1e98a", "title": "Graphical Generative Adversarial Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.03429.pdf"}, {"id": "a45450824c6e8e6b42fd9bbf52871104b6c6ce8b", "title": "Optimizing the Latent Space of Generative Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1707.05776.pdf"}, {"id": "6789bddbabf234f31df992a3356b36a47451efc7", "title": "Unsupervised Generation of Free-Form and Parameterized Avatars.", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/6789/bddbabf234f31df992a3356b36a47451efc7.pdf"}, {"id": "d5909f8d82bff4b86cc36ecd000f251c1a76293b", "title": "High-fidelity facial reflectance and geometry inference from an unconstrained image", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3201364"}, {"id": "614a7c42aae8946c7ad4c36b53290860f6256441", "title": "Joint Face Detection and Alignment Using Multitask Cascaded Convolutional Networks", "year": 2016, "pdf": "https://arxiv.org/pdf/1604.02878.pdf"}, {"id": "169565b280eb25a9cbcc1528420551371ffed314", "title": "Neural Photo Editing with Introspective Adversarial Networks", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/1e10/ea03353ed5aedf93c671f5272791929be4ac.pdf"}, {"id": "0bb574ad77f55f395450b4a9f863ecfdd4880bcd", "title": "Learning the Base Distribution in Implicit Generative Models", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/0bb5/74ad77f55f395450b4a9f863ecfdd4880bcd.pdf"}, {"id": "c4f3375dab1886f37f542d998e61d8c30a927682", "title": "Beyond Shared Hierarchies: Deep Multitask Learning through Soft Layer Ordering", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/c4f3/375dab1886f37f542d998e61d8c30a927682.pdf"}, {"id": "70f3d3d9a7402a0f62a5646a16583c6c58e3b07a", "title": "An Architecture for Deep, Hierarchical Generative Models", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/c2a0/8137657ac2312ae70991e74aa73ce93051ef.pdf"}, {"id": "99f565df31ef710a2d8a1b606e3b7f5f92ab657c", "title": "Geometry Score: A Method For Comparing Generative Adversarial Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1802.02664.pdf"}, {"id": "39d08fa8b028217384daeb3e622848451809a422", "title": "Variational Approaches for Auto-Encoding Generative Adversarial Networks", "year": "2017", "pdf": "https://arxiv.org/pdf/1706.04987.pdf"}, {"id": "710011644006c18291ad512456b7580095d628a2", "title": "Learning Residual Images for Face Attribute Manipulation", "year": "2017", "pdf": "https://arxiv.org/pdf/1612.05363.pdf"}, {"id": "2d42b5915ca18fdc5fa3542bad48981c65f0452b", "title": "Generalization and Equilibrium in Generative Adversarial Nets (GANs)", "year": "2017", "pdf": "https://arxiv.org/pdf/1703.00573.pdf"}, {"id": "bcbbb240450a511841ee8510f8dd274e6c788f2b", "title": "Unregularized Auto-Encoder with Generative Adversarial Networks for Image Generation", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3240569"}, {"id": "dfcb4773543ee6fbc7d5319b646e0d6168ffa116", "title": "Adversarial Variational Bayes: Unifying Variational Autoencoders and Generative Adversarial Networks", "year": "2017", "pdf": "https://arxiv.org/pdf/1701.04722.pdf"}, {"id": "f580b0e1020ad67bdbb11e8d99a59c21a8df1e7d", "title": "Compressed Sensing using Generative Models", "year": "2017", "pdf": "https://arxiv.org/pdf/1703.03208.pdf"}, {"id": "43a2c871450ba4d8888e8692aa98cb10e861ea71", "title": "Learning Generative ConvNet with Continuous Latent Factors by Alternating Back-Propagation", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d07c/f9bf54add8651f5cc8493cca50ef030ff9ba.pdf"}, {"id": "1a8a2539cffba25ed9a7f2b869ebb737276ccee1", "title": "Pros and Cons of GAN Evaluation Measures", "year": "2018", "pdf": "https://arxiv.org/pdf/1802.03446.pdf"}, {"id": "4a111ca4ba39386b489f9c0a9c7949e932563ddb", "title": "Automatic Group Affect Analysis in Images via Visual Attribute and Feature Networks", "year": "2018", "pdf": "http://doi.org/10.1109/ICIP.2018.8451242"}, {"id": "f8cfabecbe587c611de2696a37f96e3f77ac8555", "title": "NEMGAN: Noise Engineered Mode-matching GAN", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.03692.pdf"}, {"id": "305346d01298edeb5c6dc8b55679e8f60ba97efb", "title": "Fine-Grained Face Annotation Using Deep Multi-Task CNN", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/3053/46d01298edeb5c6dc8b55679e8f60ba97efb.pdf"}, {"id": "7689d23a22682c92bdf9a1df975fa2cdd24f1b87", "title": "MMD with Kernel Learning In practice we use finite samples from distributions to estimate", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/7689/d23a22682c92bdf9a1df975fa2cdd24f1b87.pdf"}, {"id": "cdae8e9cc9d605856cf5709b2fdf61f722d450c1", "title": "Deep Learning for Biometrics : A Survey KALAIVANI SUNDARARAJAN", "year": "2018", "pdf": null}, {"id": "e58f08ad6e0edd567f217ef08de1701a8c29fcc8", "title": "Pseudo-task Augmentation: From Deep Multitask Learning to Intratask Sharing - and Back", "year": "2018", "pdf": "https://arxiv.org/pdf/1803.04062.pdf"}, {"id": "380dd0ddd5d69adc52defc095570d1c22952f5cc", "title": "Improving Smiling Detection with Race and Gender Diversity", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/380d/d0ddd5d69adc52defc095570d1c22952f5cc.pdf"}, {"id": "80688e72b00013eabe57ce88be0c204d0b5aea2c", "title": "Semantic Face Signatures: Recognizing and Retrieving Faces by Verbal Descriptions", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8078274"}, {"id": "03889b0e8063532ae56d36dd9c54c3784a69e4d4", "title": "Learning to Play Guess Who? and Inventing a Grounded Language as a Consequence", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/c947/bdba6f8a112fe9a3584a7e50a6225d73ce68.pdf"}, {"id": "59b6ff409ae6f57525faff4b369af85c37a8dd80", "title": "Deep Attribute Driven Image Similarity Learning Using Limited Data", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ISM.2017.28"}, {"id": "a0a950f513b4fd58cee54bccc49b852943ffd02c", "title": "Image Inpainting using Block-wise Procedural Training with Annealed Adversarial Counterpart", "year": "2018", "pdf": "https://arxiv.org/pdf/1803.08943.pdf"}, {"id": "2780f8fc25320f4fb258442ceb790ffe301730fe", "title": "Generative Reversible Networks", "year": "2018", "pdf": null}, {"id": "c3a3f7758bccbead7c9713cb8517889ea6d04687", "title": "Funnel-structured cascade for multi-view face detection with alignment-awareness", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/c3a3/f7758bccbead7c9713cb8517889ea6d04687.pdf"}, {"id": "55cad1f4943018459b761f89afd9292d347610f2", "title": "Self-supervised Multi-level Face Model Learning for Monocular Reconstruction at over 250 Hz", "year": "2017", "pdf": "https://arxiv.org/pdf/1712.02859.pdf"}]}
\ No newline at end of file diff --git a/site/datasets/unknown/cofw.json b/site/datasets/unknown/cofw.json new file mode 100644 index 00000000..285e3923 --- /dev/null +++ b/site/datasets/unknown/cofw.json @@ -0,0 +1 @@ +{"id": "2724ba85ec4a66de18da33925e537f3902f21249", "paper": {"paper_id": "2724ba85ec4a66de18da33925e537f3902f21249", "key": "cofw", "title": "Robust Face Landmark Estimation under Occlusion", "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298", "address": "", "name": "COFW"}, "citations": [{"id": "b48d3694a8342b6efc18c9c9124c62406e6bf3b3", "title": "Recurrent Convolutional Shape Regression", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8305545"}, {"id": "1c1f957d85b59d23163583c421755869f248ceef", "title": "Robust Facial Landmark Detection Under Significant Head Poses and Occlusion", "year": 2015, "pdf": "http://homepages.rpi.edu/~wuy9/ICCV15/FLD_iccv15.pdf"}, {"id": "2d072cd43de8d17ce3198fae4469c498f97c6277", "title": "Random Cascaded-Regression Copse for Robust Facial Landmark Detection", "year": 2015, "pdf": "http://www.patrikhuber.ch/files/RCRC_SPL_2015.pdf"}, {"id": "1efaa128378f988965841eb3f49d1319a102dc36", "title": "Hierarchical binary CNNs for landmark localization with limited resources", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.04803.pdf"}, {"id": "768f6a14a7903099729872e0db231ea814eb05e9", "title": "De-Mark GAN: Removing Dense Watermark with Generative Adversarial Network", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411205"}, {"id": "9ab963e473829739475b9e47514f454ab467a5af", "title": "A Fully End-to-End Cascaded CNN for Facial Landmark Detection", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.33"}, {"id": "7d73adcee255469aadc5e926066f71c93f51a1a5", "title": "Face alignment by deep convolutional network with adaptive learning rate", "year": 2016, "pdf": "http://mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0001283.pdf"}, {"id": "c7c8d150ece08b12e3abdb6224000c07a6ce7d47", "title": "DeMeshNet: Blind Face Inpainting for Deep MeshFace Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1611.05271.pdf"}, {"id": "a0fd85b3400c7b3e11122f44dc5870ae2de9009a", "title": "Learning Deep Representation for Face Alignment with Auxiliary Attributes", "year": "2016", "pdf": "https://arxiv.org/pdf/1408.3967.pdf"}, {"id": "84a20d0a47c0d826b77f73075530d618ba7573d2", "title": "Look at Boundary: A Boundary-Aware Face Alignment Algorithm", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.10483.pdf"}, {"id": "15aa6c457678e25f6bc0e818e5fc39e42dd8e533", "title": "Conditional Image Generation for Learning the Structure of Visual Objects", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.07823.pdf"}, {"id": "c872d6310f2079db0cee0e69cc96da1470055225", "title": "Heterogeneous Multi-task Learning on Non-overlapping Datasets for Facial Landmark Detection", "year": 2016, "pdf": "https://doi.org/10.1007/978-3-319-46675-0_68"}, {"id": "3176ee88d1bb137d0b561ee63edf10876f805cf0", "title": "Recombinator Networks: Learning Coarse-to-Fine Feature Aggregation", "year": "2016", "pdf": "https://arxiv.org/pdf/1511.07356.pdf"}, {"id": "bd13f50b8997d0733169ceba39b6eb1bda3eb1aa", "title": "Occlusion Coherence: Detecting and Localizing Occluded Faces", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/bd13/f50b8997d0733169ceba39b6eb1bda3eb1aa.pdf"}, {"id": "e465f596d73f3d2523dbf8334d29eb93a35f6da0", "title": "On Face Segmentation, Face Swapping, and Face Perception", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/e465/f596d73f3d2523dbf8334d29eb93a35f6da0.pdf"}, {"id": "6932baa348943507d992aba75402cfe8545a1a9b", "title": "Stacked Hourglass Network for Robust Facial Landmark Localisation", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014987"}, {"id": "3352426a67eabe3516812cb66a77aeb8b4df4d1b", "title": "Joint Multi-view Face Alignment in the Wild", "year": "2017", "pdf": "https://arxiv.org/pdf/1708.06023.pdf"}, {"id": "d42dbc995318e2936714c65c028700bfd3633049", "title": "Face fiducial detection by consensus of exemplars", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477592"}, {"id": "2e3d081c8f0e10f138314c4d2c11064a981c1327", "title": "A Comprehensive Performance Evaluation of Deformable Face Tracking \u201cIn-the-Wild\u201d", "year": 2017, "pdf": "http://arxiv.org/pdf/1603.06015v1.pdf"}, {"id": "61f04606528ecf4a42b49e8ac2add2e9f92c0def", "title": "Deep Deformation Network for Object Landmark Localization", "year": "2016", "pdf": "https://arxiv.org/pdf/1605.01014.pdf"}, {"id": "45e7ddd5248977ba8ec61be111db912a4387d62f", "title": "Adversarial Learning of Structure-Aware Fully Convolutional Networks for Landmark Localization", "year": "2017", "pdf": "https://arxiv.org/pdf/1711.00253.pdf"}, {"id": "064b797aa1da2000640e437cacb97256444dee82", "title": "Coarse-to-fine Face Alignment with Multi-Scale Local Patch Regression", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/064b/797aa1da2000640e437cacb97256444dee82.pdf"}, {"id": "397085122a5cade71ef6c19f657c609f0a4f7473", "title": "Using Segmentation to Predict the Absence of Occluded Parts", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/db11/4901d09a07ab66bffa6986bc81303e133ae1.pdf"}, {"id": "1fef53b07c6c625545fc071c7386d41f87925675", "title": "A rule-based methodology and assessment for context-aware privacy", "year": 2014, "pdf": null}, {"id": "ef4b5bcaad4c36d7baa7bc166bd1712634c7ad71", "title": "Towards Spatio-temporal Face Alignment in Unconstrained Conditions", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/ef4b/5bcaad4c36d7baa7bc166bd1712634c7ad71.pdf"}, {"id": "b908edadad58c604a1e4b431f69ac8ded350589a", "title": "Deep Face Feature for Face Alignment", "year": "2017", "pdf": "https://arxiv.org/pdf/1708.02721.pdf"}, {"id": "4b6387e608afa83ac8d855de2c9b0ae3b86f31cc", "title": "Face Sketch Landmarks Localization in the Wild", "year": 2014, "pdf": "https://doi.org/10.1109/LSP.2014.2333544"}, {"id": "5da98f7590c08e83889f3cec7b0304b3610abf42", "title": "Face alignment using a deep neural network with local feature learning and recurrent regression", "year": 2017, "pdf": "https://doi.org/10.1016/j.eswa.2017.07.018"}, {"id": "25695abfe51209798f3b68fb42cfad7a96356f1f", "title": "An Investigation into Combining Both Facial Detection and Landmark Localisation into a Unified Procedure Using Gpu Computing", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/2569/5abfe51209798f3b68fb42cfad7a96356f1f.pdf"}, {"id": "5239001571bc64de3e61be0be8985860f08d7e7e", "title": "Deep Appearance Models: A Deep Boltzmann Machine Approach for Face Modeling", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/5239/001571bc64de3e61be0be8985860f08d7e7e.pdf"}, {"id": "3c086601ce0bac61047b5b931b253bd4035e1e7a", "title": "Occlusion handling in feature point tracking using ranked parts based models", "year": 2015, "pdf": "https://doi.org/10.1109/ICIP.2015.7350897"}, {"id": "e6178de1ef15a6a973aad2791ce5fbabc2cb8ae5", "title": "Improving Facial Landmark Detection via a Super-Resolution Inception Network", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/e617/8de1ef15a6a973aad2791ce5fbabc2cb8ae5.pdf"}, {"id": "3d62b2f9cef997fc37099305dabff356d39ed477", "title": "Joint Face Alignment and 3D Face Reconstruction with Application to Face Recognition", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/3d62/b2f9cef997fc37099305dabff356d39ed477.pdf"}, {"id": "0561bed18b6278434deae562d646e8adad72e75d", "title": "Low rank driven robust facial landmark regression", "year": 2015, "pdf": "https://doi.org/10.1016/j.neucom.2014.09.052"}, {"id": "f423d8be5e13d9ef979debd3baf0a1b2e1d3682f", "title": "Approaching human level facial landmark localization by deep learning", "year": 2016, "pdf": "https://doi.org/10.1016/j.imavis.2015.11.004"}, {"id": "5aafca76dbbbbaefd82f5f0265776afb5320dafe", "title": "Empirical analysis of cascade deformable models for multi-view face detection", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/5aaf/ca76dbbbbaefd82f5f0265776afb5320dafe.pdf"}, {"id": "9ef2b2db11ed117521424c275c3ce1b5c696b9b3", "title": "Robust Face Alignment Using a Mixture of Invariant Experts", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/c31b/dd00734807938dcfd8a12375bd9ffa556985.pdf"}, {"id": "26ac607a101492bc86fd81a141311066cfe9e2b5", "title": "Sieving Regression Forest Votes for Facial Feature Detection in the Wild", "year": 2013, "pdf": "http://www.eecs.qmul.ac.uk/~hy300/papers/YangPatrasiccv2013.pdf"}, {"id": "0eb45876359473156c0d4309f548da63470d30ee", "title": "A Deeply-Initialized Coarse-to-fine Ensemble of Regression Trees for Face Alignment", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/0eb4/5876359473156c0d4309f548da63470d30ee.pdf"}, {"id": "9b4b2a575641f3a7f8a5ce28b6a06c36694a9ddf", "title": "A lighting robust fitting approach of 3D morphable model for face reconstruction", "year": "2015", "pdf": "http://doi.org/10.1007/s00371-015-1158-z"}, {"id": "d850aff9d10a01ad5f1d8a1b489fbb3998d0d80e", "title": "Recognizing and Segmenting Objects in the Presence of Occlusion and Clutter", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d850/aff9d10a01ad5f1d8a1b489fbb3998d0d80e.pdf"}, {"id": "2fda461869f84a9298a0e93ef280f79b9fb76f94", "title": "OpenFace: An open source facial behavior analysis toolkit", "year": 2016, "pdf": "https://www.cl.cam.ac.uk/research/rainbow/projects/openface/wacv2016.pdf"}, {"id": "747dc0add50b86f5ba9e3e7315943d520e08f9eb", "title": "Robust Facial Landmark Localization Using LBP Histogram Correlation Based Initialization", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.78"}, {"id": "cc96eab1e55e771e417b758119ce5d7ef1722b43", "title": "An Empirical Study of Recent Face Alignment Methods", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/cc96/eab1e55e771e417b758119ce5d7ef1722b43.pdf"}, {"id": "8dfe43c76b76a97f8938f5f5f81059a1f1fa74ed", "title": "Landmark detection in 2D bioimages for geometric morphometrics: a multi-resolution tree-based approach", "year": "2017", "pdf": "http://doi.org/10.1038/s41598-017-18993-5"}, {"id": "ec7cd3fff8bdbbe7005bc8d6b7f6b87d72aac2d9", "title": "A semantic occlusion model for human pose estimation from a single depth image", "year": 2015, "pdf": "http://www.mmp.rwth-aachen.de/publications/pdf/rafi_chalearn2015.pdf"}, {"id": "cad2bd940e7580490da9cc739e597d029e166504", "title": "Salient-points-guided face alignment", "year": 2017, "pdf": null}, {"id": "940a675de8a48b54bac6b420f551529d2bc53b99", "title": "Advances , Challenges , and Opportunities in Automatic Facial Expression Recognition", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/940a/675de8a48b54bac6b420f551529d2bc53b99.pdf"}, {"id": "2df4d05119fe3fbf1f8112b3ad901c33728b498a", "title": "Multi-task Learning for Structured Output Prediction", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/891b/10c4b3b92ca30c9b93170ec9abd71f6099c4.pdf"}, {"id": "3504907a2e3c81d78e9dfe71c93ac145b1318f9c", "title": "Unconstrained Still/Video-Based Face Verification with Deep Convolutional Neural Networks", "year": 2017, "pdf": "https://arxiv.org/pdf/1605.02686v3.pdf"}, {"id": "24e099e77ae7bae3df2bebdc0ee4e00acca71250", "title": "Robust Face Alignment Under Occlusion via Regional Predictive Power Estimation", "year": 2015, "pdf": "https://qmro.qmul.ac.uk/xmlui/bitstream/handle/123456789/22467/Yang%20Robust%20Face%20Alignment%20Under%20Occlusion%20via%20Regional%20Predictive%20Power%20Estimation%202015%20Accepted.pdf?sequence=1"}, {"id": "31e57fa83ac60c03d884774d2b515813493977b9", "title": "Face alignment with cascaded semi-parametric deep greedy neural forests", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/31e5/7fa83ac60c03d884774d2b515813493977b9.pdf"}, {"id": "c38b1fa00f1f370c029984c55d4d2d40b529d00c", "title": "Neural Information Processing", "year": "2015", "pdf": "http://doi.org/10.1007/978-3-319-26561-2"}, {"id": "4c19690889fb3a12ec03e65bae6f5f20420b4ba4", "title": "Robust facial landmark detection using mixture of discriminative visibility-aware models", "year": 2016, "pdf": "https://doi.org/10.1049/iet-ipr.2015.0699"}, {"id": "24c442ac3f6802296d71b1a1914b5d44e48b4f29", "title": "Pose and Expression-Coherent Face Recovery in the Wild", "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.117"}, {"id": "f7dea4454c2de0b96ab5cf95008ce7144292e52a", "title": "Facial Landmark Detection: A Literature Survey", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.05563.pdf"}, {"id": "3d6f59e0f0e16d01b9c588a53d3b6b3b984e991e", "title": "Learning Local Responses of Facial Landmarks with Conditional Variational Auto-Encoder for Face Alignment", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.117"}, {"id": "427cf6528901d7fd5e5acfe4175ef809ed8ee7fa", "title": "SDM-Based Means of Gradient for Eye Center Localization", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8511989"}, {"id": "a83fc450c124b7e640adc762e95e3bb6b423b310", "title": "Deep Face Feature for Face Alignment and Reconstruction", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/b908/edadad58c604a1e4b431f69ac8ded350589a.pdf"}, {"id": "0c75c7c54eec85e962b1720755381cdca3f57dfb", "title": "Face Landmark Fitting via Optimized Part Mixtures and Cascaded Deformable Model", "year": 2016, "pdf": "https://webpages.uncc.edu/~szhang16/paper/PAMI_face_landmark.pdf"}, {"id": "8f772d9ce324b2ef5857d6e0b2a420bc93961196", "title": "Facial Landmark Point Localization using Coarse-to-Fine Deep Recurrent Neural Network", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.01760.pdf"}, {"id": "3bb670b2afdcc45da2b09a02aac07e22ea7dbdc2", "title": "Disentangling 3 D Pose in A Dendritic CNN for Unconstrained 2 D Face Alignment", "year": "2018", "pdf": null}, {"id": "fc1e37fb16006b62848def92a51434fc74a2431a", "title": "A Comprehensive Analysis of Deep Regression", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/fc1e/37fb16006b62848def92a51434fc74a2431a.pdf"}, {"id": "2fb8d7601fc3ad637781127620104aaab5122acd", "title": "Estimating Correspondences of Deformable Objects “In-the-Wild”", "year": 2016, "pdf": null}, {"id": "5e0b691e9e5812dd3cb120a8d77619a45aa8e4c4", "title": "Pose-indexed based multi-view method for face alignment", "year": 2016, "pdf": "https://doi.org/10.1109/ICIP.2016.7532567"}, {"id": "60824ee635777b4ee30fcc2485ef1e103b8e7af9", "title": "Cascaded Collaborative Regression for Robust Facial Landmark Detection Trained Using a Mixture of Synthetic and Real Images With Dynamic Weighting", "year": 2015, "pdf": "http://www.ee.surrey.ac.uk/CVSSP/Publications/papers/Feng-TIP-2015.pdf"}, {"id": "102b27922e9bd56667303f986404f0e1243b68ab", "title": "Multiscale recurrent regression networks for face alignment", "year": 2017, "pdf": "https://applied-informatics-j.springeropen.com/track/pdf/10.1186/s40535-017-0042-5?site=applied-informatics-j.springeropen.com"}, {"id": "74156a11c2997517061df5629be78428e1f09cbd", "title": "Preparatory coordination of head, eyes and hands: Experimental study at intersections", "year": 2016, "pdf": "http://cvrr.ucsd.edu/publications/2016/MartinRangeshTrivediICPR2016.pdf"}, {"id": "9e8637a5419fec97f162153569ec4fc53579c21e", "title": "Segmentation and Normalization of Human Ears Using Cascaded Pose Regression", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/9e86/37a5419fec97f162153569ec4fc53579c21e.pdf"}, {"id": "598744c8620e4ecbf449d14d7081fbf1cd05851f", "title": "Dense 3D face alignment from 2D video for real-time use", "year": "2017", "pdf": "https://www.ncbi.nlm.nih.gov/pubmed/29731533"}, {"id": "c5adb33bd3557c94d0e54cfe2036a1859118a65e", "title": "Fast and accurate vehicle detection by aspect ratio regression", "year": 2017, "pdf": null}, {"id": "909c23143162d98ffb2447f0018f92ac6cf8591b", "title": "Frame-based face emotion recognition using linear discriminant analysis", "year": 2017, "pdf": null}, {"id": "76b11c281ac47fe6d95e124673a408ee9eb568e3", "title": "Real-time Multi View Face Detection and Pose Estimation Aishwarya", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/76b1/1c281ac47fe6d95e124673a408ee9eb568e3.pdf"}, {"id": "1885acea0d24e7b953485f78ec57b2f04e946eaf", "title": "Combining Local and Global Features for 3D Face Tracking", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.297"}, {"id": "cbe021d840f9fc1cb191cba79d3f7e3bbcda78d3", "title": "Facial Landmark Detection via Progressive Initialization", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406479"}, {"id": "18dfc2434a95f149a6cbb583cca69a98c9de9887", "title": "Hough Networks for Head Pose Estimation and Facial Feature Localization", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/18df/c2434a95f149a6cbb583cca69a98c9de9887.pdf"}, {"id": "6ba6045e4b404c44f9b4dfce2d946019f0e85a72", "title": "Facial landmark detection based on an ensemble of local weighted regressors during real driving situation", "year": 2016, "pdf": "https://doi.org/10.1109/ICPR.2016.7899962"}, {"id": "13f6ab2f245b4a871720b95045c41a4204626814", "title": "Cortex commands the performance of skilled movement", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/9d74/382b6c4209c49de7c2b0fab7b34483ba0ddb.pdf"}, {"id": "78f2c8671d1a79c08c80ac857e89315197418472", "title": "Recurrent 3D-2D Dual Learning for Large-Pose Facial Landmark Detection", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237443"}, {"id": "a85e9e11db5665c89b057a124547377d3e1c27ef", "title": "Dynamics of Driver's Gaze: Explorations in Behavior Modeling and Maneuver Prediction", "year": "2018", "pdf": "https://arxiv.org/pdf/1802.00066.pdf"}, {"id": "0bab5213911c19c40e936b08d2f8fba01e286b85", "title": "Cascaded Pose Regression Revisited: Face Alignment in Videos", "year": 2017, "pdf": "https://doi.org/10.1109/BigMM.2017.81"}, {"id": "d4453ec649dbde752e74da8ab0984c6f15cc6e06", "title": "An augmented image gradients based supervised regression technique for iris center localization", "year": "2016", "pdf": "http://doi.org/10.1007/s11042-016-3361-y"}, {"id": "d03265ea9200a993af857b473c6bf12a095ca178", "title": "Multiple deep convolutional neural networks averaging for face alignment", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/d032/65ea9200a993af857b473c6bf12a095ca178.pdf"}, {"id": "df80fed59ffdf751a20af317f265848fe6bfb9c9", "title": "Learning Deep Sharable and Structural Detectors for Face Alignment", "year": 2017, "pdf": "http://ivg.au.tsinghua.edu.cn/paper/2017_Learning%20deep%20sharable%20and%20structural%20detectors%20for%20face%20alignment.pdf"}, {"id": "891b10c4b3b92ca30c9b93170ec9abd71f6099c4", "title": "2 New Statement for Structured Output Regression Problems", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/891b/10c4b3b92ca30c9b93170ec9abd71f6099c4.pdf"}, {"id": "390f3d7cdf1ce127ecca65afa2e24c563e9db93b", "title": "Learning Deep Representation for Face Alignment with Auxiliary Attributes", "year": 2016, "pdf": "https://arxiv.org/pdf/1408.3967v2.pdf"}, {"id": "88ad82e6f2264f75f7783232ba9185a2f931a5d1", "title": "Facial Expression Analysis under Partial Occlusion: A Survey", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/88ad/82e6f2264f75f7783232ba9185a2f931a5d1.pdf"}, {"id": "a81da7746f4f58e7211e65f11e6520144f8c003d", "title": "Facial Landmark Localization in the Wild by Backbone-Branches Representation Learning", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8499059"}, {"id": "355746e6e1770cfcc2e91479f8134c854a77ff96", "title": "Human mental states recognition under face occlusion", "year": 2017, "pdf": null}, {"id": "91883dabc11245e393786d85941fb99a6248c1fb", "title": "Face alignment in-the-wild: A Survey", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/9188/3dabc11245e393786d85941fb99a6248c1fb.pdf"}, {"id": "527dda77a3864d88b35e017d542cb612f275a4ec", "title": "Facial 3D model registration under occlusions with sensiblepoints-based reinforced hypothesis refinement", "year": 2017, "pdf": "https://arxiv.org/pdf/1709.00531v1.pdf"}, {"id": "f0ae807627f81acb63eb5837c75a1e895a92c376", "title": "Facial Landmark Detection using Ensemble of Cascaded Regressions", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/f0ae/807627f81acb63eb5837c75a1e895a92c376.pdf"}, {"id": "50ccc98d9ce06160cdf92aaf470b8f4edbd8b899", "title": "Towards robust cascaded regression for face alignment in the wild", "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W08/papers/Qu_Towards_Robust_Cascaded_2015_CVPR_paper.pdf"}, {"id": "c18d537037caf399c4fabfdec896c376675af58a", "title": "Human-Eye Tracking and Location Algorithm Based on AdaBoost-STC and RF", "year": 2017, "pdf": null}, {"id": "56fd4c05869e11e4935d48aa1d7abb96072ac242", "title": "OpenFace 2.0: Facial Behavior Analysis Toolkit", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373812"}, {"id": "fce1c3f4948cf300694c18c3fcc5486cd060af13", "title": "Head Pose Estimation on Low-Quality Images", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373880"}, {"id": "43ed518e466ff13118385f4e5d039ae4d1c000fb", "title": "Classification of Occluded Objects Using Fast Recurrent Processing", "year": 2015, "pdf": "http://arxiv.org/abs/1505.01350"}, {"id": "faead8f2eb54c7bc33bc7d0569adc7a4c2ec4c3b", "title": "Combining Data-driven and Model-driven Methods for Robust Facial Landmark Detection", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/faea/d8f2eb54c7bc33bc7d0569adc7a4c2ec4c3b.pdf"}, {"id": "31b05f65405534a696a847dd19c621b7b8588263", "title": "UMDFaces: An annotated face dataset for training deep networks", "year": 2017, "pdf": "http://arxiv.org/abs/1611.01484"}, {"id": "86c053c162c08bc3fe093cc10398b9e64367a100", "title": "Cascade of forests for face alignment", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/86c0/53c162c08bc3fe093cc10398b9e64367a100.pdf"}, {"id": "3d78c144672c4ee76d92d21dad012bdf3c3aa1a0", "title": "Unconstrained Still/Video-Based Face Verification with Deep Convolutional Neural Networks", "year": "2017", "pdf": "http://doi.org/10.1007/s11263-017-1029-3"}, {"id": "fbe4f8a6af19f63e47801c6f31402f9baae5fecf", "title": "Real-time expression-sensitive HMD face reconstruction", "year": "2015", "pdf": "http://dl.acm.org/citation.cfm?id=2820910"}, {"id": "6d8c9a1759e7204eacb4eeb06567ad0ef4229f93", "title": "Face Alignment Robust to Pose, Expressions and Occlusions", "year": "2016", "pdf": "https://arxiv.org/pdf/1707.05938.pdf"}, {"id": "113b06e70b7eead8ae7450bafe9c91656705024c", "title": "Face Alignment across Large Pose via MT-CNN Based 3D Shape Reconstruction", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373832"}, {"id": "dcf6ecd51ba135d432fcb7697fc6c52e4e7b0a43", "title": "Factorized Variational Autoencoders for Modeling Audience Reactions to Movies", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100120"}, {"id": "83e093a07efcf795db5e3aa3576531d61557dd0d", "title": "Facial Landmark Localization Using Robust Relationship Priors and Approximative Gibbs Sampling", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/83e0/93a07efcf795db5e3aa3576531d61557dd0d.pdf"}, {"id": "766728bac030b169fcbc2fbafe24c6e22a58ef3c", "title": "A survey of deep facial landmark detection", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/7667/28bac030b169fcbc2fbafe24c6e22a58ef3c.pdf"}, {"id": "66490b5869822b31d32af7108eaff193fbdb37b0", "title": "Cascade Multi-View Hourglass Model for Robust 3D Face Alignment", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373857"}, {"id": "2f489bd9bfb61a7d7165a2f05c03377a00072477", "title": "Structured Semi-supervised Forest for Facial Landmarks Localization with Face Mask Reasoning", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/2f48/9bd9bfb61a7d7165a2f05c03377a00072477.pdf"}, {"id": "6a26893ed63830d00f6d011679d1b1ed2d8466a9", "title": "PNN-SIFT: An enhanced face recognition and classification system in image processing", "year": 2017, "pdf": null}, {"id": "df3d85ecf8e326774cab59aab75b572fcf9767cc", "title": "The novel part-based cascaded regression algorithm research combining with pose estimation", "year": "2018", "pdf": "http://doi.org/10.1007/s00371-018-1610-y"}, {"id": "5b5b9c6c67855ede21a60c834aea5379df7d51b7", "title": "Advances in compositional fitting of active appearance models", "year": "2016", "pdf": "http://hdl.handle.net/10044/1/45280"}, {"id": "af11769a427eb8daa8435b1ea3252531b4275db8", "title": "A Hybrid Approach for Face Alignment 1", "year": "2017", "pdf": null}, {"id": "12d8730da5aab242795bdff17b30b6e0bac82998", "title": "Persistent Evidence of Local Image Properties in Generic ConvNets", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/12d8/730da5aab242795bdff17b30b6e0bac82998.pdf"}, {"id": "73c9cbbf3f9cea1bc7dce98fce429bf0616a1a8c", "title": "Unsupervised Learning of Object Landmarks by Factorized Spatial Embeddings", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.02193.pdf"}, {"id": "06262d14323f9e499b7c6e2a3dec76ad9877ba04", "title": "Real-Time Pose Estimation Piggybacked on Object Detection", "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Juranek_Real-Time_Pose_Estimation_ICCV_2015_paper.pdf"}, {"id": "9391618c09a51f72a1c30b2e890f4fac1f595ebd", "title": "Globally Tuned Cascade Pose Regression via Back Propagation with Application in 2D Face Pose Estimation and Heart Segmentation in 3D CT Images", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/9391/618c09a51f72a1c30b2e890f4fac1f595ebd.pdf"}, {"id": "f61829274cfe64b94361e54351f01a0376cd1253", "title": "Regressing a 3D Face Shape from a Single Image", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410784"}, {"id": "15bf0e70b069cea62d87d3bf706172c4a6a7779e", "title": "Robust face recognition and classification system based on SIFT and DCP techniques in image processing", "year": 2017, "pdf": null}, {"id": "05ea7930ae26165e7e51ff11b91c7aa8d7722002", "title": "Learning And-Or Model to Represent Context and Occlusion for Car Detection and Viewpoint Estimation", "year": 2016, "pdf": "http://www.stat.ucla.edu/~sczhu/papers/PAMI_car_occlusion_AOG.pdf"}, {"id": "0ba64f4157d80720883a96a73e8d6a5f5b9f1d9b", "title": "Convolutional Point-set Representation: A Convolutional Bridge Between a Densely Annotated Image and 3D Face Alignment", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/84b7/e2138a3701432c33ea70a1297328cd814ab5.pdf"}, {"id": "4f742c09ce12859b20deaa372c8f1575acfc99c9", "title": "How do you smile? Towards a comprehensive smile analysis system", "year": 2017, "pdf": "https://doi.org/10.1016/j.neucom.2017.01.020"}, {"id": "29c5a44e01d1126505471b2ab46163d598c871c7", "title": "Improving Landmark Localization with Semi-Supervised Learning", "year": "2017", "pdf": "https://arxiv.org/pdf/1709.01591.pdf"}, {"id": "902cc7dd4ecfb2b6750905ef08bceeed24e1eeeb", "title": "Shape-appearance-correlated active appearance model", "year": 2016, "pdf": "https://doi.org/10.1016/j.patcog.2016.03.002"}, {"id": "4da735d2ed0deeb0cae4a9d4394449275e316df2", "title": "The rhythms of head, eyes and hands at intersections", "year": 2016, "pdf": "http://cvrr.ucsd.edu/publications/2016/0406.pdf"}, {"id": "c1ebbdb47cb6a0ed49c4d1cf39d7565060e6a7ee", "title": "Robust Facial Landmark Localization Based on Texture and Pose Correlated Initialization", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.05612.pdf"}, {"id": "5a10d74c7fc3294f76d771df413fe0b0b35f2ab5", "title": "Face Alignment Based on Incremental Learning for Bayonet Surveillance", "year": 2016, "pdf": null}, {"id": "419fec1a76d9233dcaa8d2c98ea622d19f663261", "title": "Unsupervised learning of object frames by dense equivariant image labelling", "year": "2017", "pdf": "https://arxiv.org/pdf/1706.02932.pdf"}, {"id": "b6bbaa26f19ced1ce357d5bce903d772d5a49102", "title": "Privileged Information-Based Conditional Structured Output Regression Forest for Facial Point Detection", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7005459"}, {"id": "37278ffce3a0fe2c2bbf6232e805dd3f5267eba3", "title": "Can we still avoid automatic face detection?", "year": 2016, "pdf": "http://arxiv.org/pdf/1602.04504v1.pdf"}, {"id": "614a7c42aae8946c7ad4c36b53290860f6256441", "title": "Joint Face Detection and Alignment Using Multitask Cascaded Convolutional Networks", "year": 2016, "pdf": "https://arxiv.org/pdf/1604.02878.pdf"}, {"id": "1e8fd77d4717e9cb6079e10771dd2ed772098cb3", "title": "Wearable social camera: Egocentric video summarization for social interaction", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICMEW.2016.7574681"}, {"id": "2aea27352406a2066ddae5fad6f3f13afdc90be9", "title": "Bottom-Up and Top-Down Reasoning with Hierarchical Rectified Gaussians", "year": 2016, "pdf": "http://arxiv.org/pdf/1507.05699v4.pdf"}, {"id": "8d646ac6e5473398d668c1e35e3daa964d9eb0f6", "title": "Memory-Efficient Global Refinement of Decision-Tree Ensembles and its Application to Face Alignment", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/8d64/6ac6e5473398d668c1e35e3daa964d9eb0f6.pdf"}, {"id": "b1a3b19700b8738b4510eecf78a35ff38406df22", "title": "Automatic Analysis of Facial Actions: A Survey", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/b1a3/b19700b8738b4510eecf78a35ff38406df22.pdf"}, {"id": "7234468db46b37e2027ab2978c67b48b8581f796", "title": "Mirrored non-maximum suppression for accurate object part localization", "year": 2015, "pdf": "https://doi.org/10.1109/ACPR.2015.7486464"}, {"id": "9ce4541d21ee3511bf3dc55bc3cd01222194d95a", "title": "Face inpainting based on high-level facial attributes", "year": 2017, "pdf": "https://doi.org/10.1016/j.cviu.2017.05.008"}, {"id": "2c14c3bb46275da5706c466f9f51f4424ffda914", "title": "L2, 1-based regression and prediction accumulation across views for robust facial landmark detection", "year": "2016", "pdf": "http://doi.org/10.1016/j.imavis.2015.09.003"}]}
\ No newline at end of file diff --git a/site/datasets/unknown/feret.json b/site/datasets/unknown/feret.json new file mode 100644 index 00000000..719f25e6 --- /dev/null +++ b/site/datasets/unknown/feret.json @@ -0,0 +1 @@ +{"id": "0c4a139bb87c6743c7905b29a3cfec27a5130652", "paper": {"paper_id": "0c4a139bb87c6743c7905b29a3cfec27a5130652", "key": "feret", "title": "The FERET Verification Testing Protocol for Face Recognition Algorithms", "year": 1998, "pdf": "http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf", "address": "", "name": "FERET"}, "citations": [{"id": "5f107c92dd1c3f294b53627a5de1c7c46d996994", "title": "Complex Eye Movement Pattern Biometrics: The Effects of Environment and Stimulus", "year": 2013, "pdf": "http://cs.txstate.edu/~ok11/papers_published/2013_TIFS_Ho_Ko.pdf"}, {"id": "ff3d4f2406ca2d78b20ed94a33983bca3583d520", "title": "Aguar\u00e1: An Improved Face Recognition Algorithm through Gabor Filter Adaptation", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/ff3d/4f2406ca2d78b20ed94a33983bca3583d520.pdf"}, {"id": "e018c7f468a9b61cd6e7dcbc40b332a8a25808ae", "title": "Face Recognition by Face Bunch Graph Method", "year": 2004, "pdf": "http://pdfs.semanticscholar.org/e018/c7f468a9b61cd6e7dcbc40b332a8a25808ae.pdf"}, {"id": "3adae788b345c778d93d51cb9a1a5c38e718b1a6", "title": "Oculomotor Plant Characteristics: The Effects of Environment and Stimulus", "year": 2016, "pdf": "https://doi.org/10.1109/TIFS.2015.2503263"}, {"id": "e163118b4a5b8016754134215433eee1f2c0065a", "title": "3-D Shape Matching for Face Analysis and Recognition", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/e163/118b4a5b8016754134215433eee1f2c0065a.pdf"}, {"id": "a5f10a8c7a64c3339f7cbad9b518ba29f5f74b7b", "title": "Multi-weighted majority voting algorithm on support vector machine and its application", "year": 2009, "pdf": null}, {"id": "c51039a4cbfcdb0175f15824e186998500f5b85a", "title": "Processing of Face Images and Its Applications", "year": 1999, "pdf": "http://pdfs.semanticscholar.org/c510/39a4cbfcdb0175f15824e186998500f5b85a.pdf"}, {"id": "a00bcd5db27ab270a11e83d3286f0c18a847d674", "title": "Computing the 3D face recognition based on pseudo 2D Hidden Markov Models using geodesic distances", "year": 2008, "pdf": null}, {"id": "4273a9d1605a69ac66440352b92ebeb230fd34f6", "title": "Simple Test Procedure for Image-Based Biometric Veri cation Systems", "year": 1999, "pdf": "http://pdfs.semanticscholar.org/4273/a9d1605a69ac66440352b92ebeb230fd34f6.pdf"}, {"id": "c29487c5eb0cdb67d92af1bc0ecbcf825e2abec3", "title": "3-D Face Recognition With the Geodesic Polar Representation", "year": 2007, "pdf": "http://www.iti.gr/files/3d%20face%20recognition%20with%20the%20geodesic%20polar%20representation.pdf"}, {"id": "5ff6aa027b2772b3cfb45bdde22ef808ba59acf5", "title": "Shape and texture combined face recognition for detection of forged ID documents", "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7522348"}, {"id": "99a17a7f9a263ad357cab88b5607ae626139cec7", "title": "Pyramidal algorithm for SVM-classification", "year": 2012, "pdf": null}, {"id": "8286c8eb0df7c9f1b2000b67066331f4e541f5bd", "title": "Targeted biometric impersonation", "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6547323"}, {"id": "5ce0309a124ec78a6b55de0b564ae13bff8c1dbc", "title": "The discriminant elastic graph matching algorithm applied to frontal face verification", "year": 2007, "pdf": "https://doi.org/10.1016/j.patcog.2007.01.026"}, {"id": "2a7e2cda27807d24b845f5b5080fb1296c302bfe", "title": "Personal Authentication Using Signature Recognition", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/2a7e/2cda27807d24b845f5b5080fb1296c302bfe.pdf"}, {"id": "10e704c82616fb5d9c48e0e68ee86d4f83789d96", "title": "INSTITUT FU\u0308R INFORMATIK UND PRAKTISCHE MATHEMATIK Gabor Wavelet Networks for Object Representation", "year": 2000, "pdf": "http://pdfs.semanticscholar.org/10e7/04c82616fb5d9c48e0e68ee86d4f83789d96.pdf"}, {"id": "6dcf418c778f528b5792104760f1fbfe90c6dd6a", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984"}, {"id": "57c929b6f30eec954dc5f17a52fbce290d8e3ca9", "title": "Performance characterization in computer vision: A guide to best practices", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/57c9/29b6f30eec954dc5f17a52fbce290d8e3ca9.pdf"}, {"id": "bccef9817e38e8e9b450edc062f3938f1e25281c", "title": "Frontal face detection with evolutionary template matching", "year": 2012, "pdf": null}, {"id": "0994916f67fd15687dd5d7e414becb1cd77129ac", "title": "Multi Class Different Problem Solving Using Intelligent Algorithm", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/0994/916f67fd15687dd5d7e414becb1cd77129ac.pdf"}, {"id": "8fbe68810cbc53521395829620060cf9558231cc", "title": "Learning Discriminant Person-Specific Facial Models Using Expandable Graphs", "year": 2007, "pdf": "https://ibug.doc.ic.ac.uk/media/uploads/documents/ieee_tifs_2007_a.pdf"}, {"id": "a1b7b23bd8f2b2ef37a9113e6b8499f0069aac85", "title": "Performance assessment of face recognition using super-resolution", "year": 2010, "pdf": "http://www.nist.gov/customcf/get_pdf.cfm?pub_id=906932"}, {"id": "9ba8dadb888433f6e4b47ea2b7ec7b0c9f8eeb0c", "title": "Component-based face detection method for various types of occluded faces", "year": 2008, "pdf": null}, {"id": "6e7b2afb4daf1fe50a62faf75018ff81c24ee526", "title": "Submitted to CVPR ' 99 Discriminant Analysis based Feature ExtractionW", "year": 1999, "pdf": "http://pdfs.semanticscholar.org/6e7b/2afb4daf1fe50a62faf75018ff81c24ee526.pdf"}, {"id": "f43687ad1aea44b841c7700a1a30d08806d2add2", "title": "Component-based robust face detection using AdaBoost and decision tree", "year": 2006, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FGR.2006.33"}, {"id": "670e2c9b18ad853e86fe402bb8161bc81e38887d", "title": "A biometrical data quality analysis method to reliably evaluate the efficiency of recognition algorithms and systems", "year": 2012, "pdf": null}, {"id": "53f8f1ddd83a9e0e0821aaa883fbf7c1f7f5426e", "title": "Face Recognition using Principal Component Analysis and Log-Gabor Filters", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/5981/234739b71f7eabc7f08a538e5ab0a9e7943f.pdf"}, {"id": "51b70582fb0d536d4a235f91bf6ad382f29e2601", "title": "Detection of emotions from video in non-controlled environment. (D\u00e9tection des \u00e9motions \u00e0 partir de vid\u00e9os dans un environnement non contr\u00f4l\u00e9)", "year": "2013", "pdf": "https://pdfs.semanticscholar.org/e36f/a8b870fd155f9df898bcc6613f6554eab519.pdf"}, {"id": "d7b534178ef47def4aea4850782b13ef21bf8e96", "title": "Sparse 3D directional vertices vs continuous 3D curves: Efficient 3D surface matching and its application for single model face recognition", "year": "2017", "pdf": "http://doi.org/10.1016/j.patcog.2016.12.009"}, {"id": "ae419d28ab936cbbc420dcfd1decb16a45afc8a9", "title": "Real-time face verification using multiple feature combination and a support vector machine supervisor", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/ae41/9d28ab936cbbc420dcfd1decb16a45afc8a9.pdf"}, {"id": "7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22", "title": "Labeled Faces in the Wild: A Survey", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf"}, {"id": "e0c081a007435e0c64e208e9918ca727e2c1c44e", "title": "Universidad De Las Palmas", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/e0c0/81a007435e0c64e208e9918ca727e2c1c44e.pdf"}, {"id": "90ce227ec08053ea6acf9f9f9f53d8b7169574f2", "title": "An Introduction to Evaluating Biometric Systems", "year": 2000, "pdf": "http://pdfs.semanticscholar.org/90ce/227ec08053ea6acf9f9f9f53d8b7169574f2.pdf"}, {"id": "5d5cd6fa5c41eb9d3d2bab3359b3e5eb60ae194e", "title": "Face Recognition Algorithms", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/5d5c/d6fa5c41eb9d3d2bab3359b3e5eb60ae194e.pdf"}, {"id": "3f54660f555c4ef356375ec8c589891478d59513", "title": "Principal Component Analysis of Multi-view Images for Viewpoint-Independent Face Recognition", "year": 2006, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AVSS.2006.93"}, {"id": "ddbb69b0f60960a29c3f861010ec55a22ab86d61", "title": "A novel face representation using local Zernike moments", "year": 2012, "pdf": "https://doi.org/10.1109/SIU.2012.6204621"}, {"id": "bfecffd8fc8c309154b51e8d762fdc03918a2db0", "title": "Gender classification with Local Zernike Moments and local binary patterns", "year": 2014, "pdf": "https://doi.org/10.1109/SIU.2014.6830519"}, {"id": "617a6935643615f09ef2b479609baa0d5f87cd67", "title": "To Be Taken At Face Value? Computerised Identification", "year": 2002, "pdf": "http://pdfs.semanticscholar.org/617a/6935643615f09ef2b479609baa0d5f87cd67.pdf"}, {"id": "dddafd9fc479b06cb1601b1280c19181bc6172a7", "title": "Gabor Saliency Map for Face Recognition", "year": 2007, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICIAP.2007.72"}, {"id": "422fc05b3ef72e96c87b9aa4190efa7c7fb8c170", "title": "Preprocessing Technique for Face Recognition Applications under Varying Illumination Conditions", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/422f/c05b3ef72e96c87b9aa4190efa7c7fb8c170.pdf"}, {"id": "d24a30ed78b749f3730e25dcef89472dd5fb439c", "title": "Improving Face Recognition Performance Using a Hierarchical Bayesian Model", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/d24a/30ed78b749f3730e25dcef89472dd5fb439c.pdf"}, {"id": "07b682ecd645712fd1d1d1ce31c02ad548e3b05e", "title": "Quasi-convex Optimization of Metrics in Biometric Score Fusion", "year": 2011, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICIG.2011.50"}, {"id": "96a7f2faf4baa09184deb458a03146805d62beed", "title": "Passive Three Dimensional Face Recognition Using Iso-Geodesic Contours and Procrustes Analysis", "year": 2013, "pdf": "https://doi.org/10.1007/s11263-013-0631-2"}, {"id": "c4c4e5ff454584ae6a68d25b36bfc860e9a893a0", "title": "Real-Time Facial Recognition System\u2014Design, Implementation and Validation", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/c4c4/e5ff454584ae6a68d25b36bfc860e9a893a0.pdf"}, {"id": "aeb4cb00f0ab02b5a082daebac959d29e254dd8b", "title": "A Region-Based Scheme Using RKLT and Predictive Classified Vector Quantization", "year": "1999", "pdf": "http://doi.org/10.1006/cviu.1999.0776"}, {"id": "f4c768a8f9ae211168acafed36d371d793e768b3", "title": "Statistical shape modelling for expression-invariant face analysis and recognition", "year": 2014, "pdf": "https://doi.org/10.1007/s10044-014-0439-x"}, {"id": "90dcf6c0b414900c606112c0feff7ff2d68bb94e", "title": "Pose and illumination invariant 2D to 3D facial recognition system", "year": 2008, "pdf": null}, {"id": "6f0b8920d39ac44eea320a4df2763137a71d851c", "title": "Face Verification using External Features", "year": 2006, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FGR.2006.48"}, {"id": "cf01707bafd2850af6a46933a3029bc2a9aeecbf", "title": "Smartphone based face recognition tool for the blind", "year": 2010, "pdf": null}, {"id": "f20fe5c662fc90fb0032cdfe39812e83456ca46a", "title": "Digital Watermark for 3D Face Model Authentication and EER Performance Analysis", "year": 2008, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ISDA.2008.293"}, {"id": "73c72161969a070b3caa40d4f075ba501a1b994b", "title": "Expression-Invariant 3D Face Recognition Using Patched Geodesic Texture Transform", "year": 2010, "pdf": "http://doi.ieeecomputersociety.org/10.1109/DICTA.2010.52"}, {"id": "e68083909381a8fbd0e4468aa06204ac00a0e6fc", "title": "Visual Identification by Signature Tracking", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/e680/83909381a8fbd0e4468aa06204ac00a0e6fc.pdf"}, {"id": "fec6648b4154fc7e0892c74f98898f0b51036dfe", "title": "A Generic Face Processing Framework: Technologies, Analyses and Applications", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/fec6/648b4154fc7e0892c74f98898f0b51036dfe.pdf"}, {"id": "98d7508f449fe44769c3f343778663779497ccbd", "title": "Shared Feature Extraction for Nearest Neighbor Face Recognition", "year": 2008, "pdf": "https://doi.org/10.1109/TNN.2007.911742"}, {"id": "0922e7d583d02f6078e59974a3de4452382ca9dd", "title": "Local approach for face verification in polar frequency domain", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/1bc7/4d02010455ee2abe0eeb0ca8bfa359abd368.pdf"}, {"id": "0a1e3d271fefd506b3a601bd1c812a9842385829", "title": "Face Recognition Using 3D Directional Corner Points", "year": 2014, "pdf": "https://doi.org/10.1109/ICPR.2014.483"}, {"id": "e8aa9aac39456a422bd7a1a87f943af5856e9ad2", "title": "A novel scheme of face verification using active appearance models", "year": 2005, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AVSS.2005.1577275"}, {"id": "1cd584f519d9cd730aeef1b1d87f7e2e82b4de59", "title": "A fully automatic face recognition system using a combined audio - visual approach \u2217", "year": 2004, "pdf": "http://pdfs.semanticscholar.org/1cd5/84f519d9cd730aeef1b1d87f7e2e82b4de59.pdf"}, {"id": "6e261b9e539ecd03d76063f893d59c6eafb6ed43", "title": "On the Use of External Face Features for Identity Verification", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/6e26/1b9e539ecd03d76063f893d59c6eafb6ed43.pdf"}, {"id": "f5edbb47a9a200c17f0c34ec8cb6fc4a2f9cfc20", "title": "Method of multimodal biometric data analysis for optimal efficiency evaluation of recognition algorithms and systems", "year": 2011, "pdf": null}, {"id": "7ef0cc4f3f7566f96f168123bac1e07053a939b2", "title": "Triangular Similarity Metric Learning: a Siamese Architecture Approach. ( L'apprentissage de similarit\u00e9 triangulaire en utilisant des r\u00e9seaux siamois)", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/e735/b8212d8a81909753291d5d06789a917014f8.pdf"}, {"id": "adee5054f386c6eb8ca83417c9b9ce4571aa2633", "title": "2.5D face recognition using Patch Geodesic Moments", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/adee/5054f386c6eb8ca83417c9b9ce4571aa2633.pdf"}, {"id": "29c9af31eb125b696ce34d0a8c64382f7e97bf23", "title": "Vision based tracking and recognition of dynamic hand gestures", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/967e/4cbb879e43c0881950060254cdb798149ba9.pdf"}, {"id": "0f0fcf041559703998abf310e56f8a2f90ee6f21", "title": "The FERET Evaluation Methodology for Face-Recognition Algorithms", "year": 1997, "pdf": "http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf"}, {"id": "4d3a6c2cee0cf06ff6471fad3d65a5835d0552f8", "title": "3-D Face Recognition Using Geodesic-Map Representation and Statistical Shape Modelling", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/4d3a/6c2cee0cf06ff6471fad3d65a5835d0552f8.pdf"}, {"id": "db157b6a9733fc50086bd8bd197c7d995f5d13ce", "title": "Breadth-first-based decision algorithm for facial biometrics", "year": 2009, "pdf": null}, {"id": "dbd5e9691cab2c515b50dda3d0832bea6eef79f2", "title": "Image - based Face Recognition : Issues and Methods 1", "year": 2002, "pdf": "http://pdfs.semanticscholar.org/dbd5/e9691cab2c515b50dda3d0832bea6eef79f2.pdf"}, {"id": "e8867f819f39c1838bba7d446934258035d4101c", "title": "Face recognition performance with superresolution.", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/e886/7f819f39c1838bba7d446934258035d4101c.pdf"}, {"id": "2fa4096260ae64fa704f9ff8d7a8d1552f903f43", "title": "Fully Private Noninteractive Face Verification", "year": 2013, "pdf": "https://doi.org/10.1109/TIFS.2013.2262273"}, {"id": "be427965a79aa1191a1ea9dd79717d89bcb74ad1", "title": "Face Recognition Using Separate Layers of the RGB Image", "year": 2008, "pdf": "http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2008.162"}, {"id": "c16bae6b2e578df2cba8e436e02bdeda281c2743", "title": "Tensor Discriminant Color Space for Face Recognition", "year": 2011, "pdf": "https://doi.org/10.1109/TIP.2011.2121084"}, {"id": "b277bde51641d6b08693c171aea761beb14af800", "title": "Face Kernel Extraction from Local Features", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/b277/bde51641d6b08693c171aea761beb14af800.pdf"}, {"id": "281be1be2f0ecce173e3678a7e87419f0815e016", "title": "Studies of Plain-to-Rolled Fingerprint Matching Using the NIST Algorithmic Test Bed (ATB)", "year": 2004, "pdf": "http://pdfs.semanticscholar.org/281b/e1be2f0ecce173e3678a7e87419f0815e016.pdf"}, {"id": "87c6ba55b0f817de4504e39dbb201842ae102c9f", "title": "Three Dimensional Face Recognition Using Iso-Geodesic and Iso-Depth Curves", "year": 2008, "pdf": "http://live.ece.utexas.edu/publications/2008/sj_btas2008.pdf"}, {"id": "db6d00f9237cce392c08b422662b48baa2ed1b80", "title": "A New Framework for Biometric Face Recognition Using Visual", "year": "2012", "pdf": "https://pdfs.semanticscholar.org/db6d/00f9237cce392c08b422662b48baa2ed1b80.pdf"}, {"id": "2b2aee745c608c67f931ef064f1d672c0f549262", "title": "Local Zernike Moments: A new representation for face recognition", "year": 2012, "pdf": "http://sariyanidi.pythonanywhere.com/media/sariyanidi_icip12None.pdf"}, {"id": "1e5fab737794d18f4fb385a53d5ec0fc5c74f32b", "title": "Automatic Face Recognition System Based on Local Fourier-Bessel Features", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/1e5f/ab737794d18f4fb385a53d5ec0fc5c74f32b.pdf"}, {"id": "26679e1885b1ce186e80551befdf82e57b3f7455", "title": "Targeted biometric impersonation", "year": 2013, "pdf": "https://doi.org/10.1109/IWBF.2013.6547323"}, {"id": "9499b8367a84fccb3651a95e4391d6e17fd92ec5", "title": "Face Recognition Issues in a Border Control Environment", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/9499/b8367a84fccb3651a95e4391d6e17fd92ec5.pdf"}, {"id": "5327241cdbfcfa39abadb4753c7f3706bc24f94a", "title": "Face verification using local binary patterns generic histogram adaptation and Chi-square based decision", "year": 2013, "pdf": "http://ieeexplore.ieee.org/document/6623966/"}, {"id": "27c9ddb72360f4cd0f715cd7ea82fa399af91f11", "title": "Multiresolution face recognition", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/27c9/ddb72360f4cd0f715cd7ea82fa399af91f11.pdf"}, {"id": "1e8eee51fd3bf7a9570d6ee6aa9a09454254689d", "title": "Face Search at Scale", "year": 2017, "pdf": "http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/WangOttoJain_FaceSearchAtScale_TPAMI.pdf"}, {"id": "513505f2fd1b929ccea1de0dd4499f239815b2cc", "title": "Biometric Authentication for Border Control Applications", "year": 2008, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TKDE.2007.190716"}, {"id": "d341ff4e93ff4407251d00417c9a756a68b6f5be", "title": "Recognition of identical twins using fusion of various facial feature extractors", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/d341/ff4e93ff4407251d00417c9a756a68b6f5be.pdf"}, {"id": "7189d5584416ef2a39d6ab16929dfecdddc10081", "title": "A Review of Face Sketch Recognition Systems", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/7189/d5584416ef2a39d6ab16929dfecdddc10081.pdf"}, {"id": "0cf7741e1fdb11a77cdf39b4dda8c65a62af4f23", "title": "Learning Prototype Hyperplanes for Face Verification in the Wild", "year": 2013, "pdf": "http://vipl.ict.ac.cn/sites/default/files/papers/files/2013_TIP_mnkan_Learning%20Prototype%20Hyperplanes%20for%20Face%20Verification%20in%20the%20Wild.pdf"}, {"id": "383a0c3a59311fada94ba0ebc55493730b0f49cb", "title": "Real-time embedded face recognition for smart home", "year": 2005, "pdf": "http://ai.pku.edu.cn/aiwebsite/research.files/collected%20papers%20-%20others/Real-time%20embedded%20face%20recognition%20for%20smart%20home.pdf"}, {"id": "3961ad661e7f1af75941b156ae3e0387dfcb1a49", "title": "A Two-Phase Test Sample Sparse Representation Method for Use With Face Recognition", "year": 2011, "pdf": "https://doi.org/10.1109/TCSVT.2011.2138790"}, {"id": "468322e98f87a33d926aad99295acc2919b2ca0b", "title": "Wally: crowd powered image matching on tablets", "year": 2012, "pdf": "http://users.soe.ucsc.edu/~davis/papers/2012_Wally-Pai-Davis-Final.pdf"}, {"id": "f4fc77660665ae58993065c6a336367e9a6c85f7", "title": "Biview face recognition in the shape-texture domain", "year": 2013, "pdf": "https://doi.org/10.1016/j.patcog.2012.12.009"}, {"id": "ca8dc160836a87055579ea9bc4fc379a95f3156a", "title": "Face Recognition Based on Nonlinear DCT Discriminant Feature Extraction Using Improved Kernel DCV", "year": "2009", "pdf": "https://pdfs.semanticscholar.org/ca8d/c160836a87055579ea9bc4fc379a95f3156a.pdf"}, {"id": "3b2261fd37e999741b6f8d5b956a177e15521b10", "title": "Partially Distributed Representations of Objects and Faces in Ventral Temporal Cortex", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/3fab/578a7f5edee7b8ccffb1970a5f939a9000e4.pdf"}, {"id": "4192454749b4fc4a95348e7260c2f5c56a06cafd", "title": "Image retrieval using efficient feature vectors generated from compressed domain", "year": 2007, "pdf": "http://www.eurasip.org/Proceedings/Eusipco/Eusipco2007/Papers/c3l-f01.pdf"}, {"id": "31a38fd2d9d4f34d2b54318021209fe5565b8f7f", "title": "Pose-Invariant Face Recognition Using Markov Random Fields", "year": 2013, "pdf": "http://www.umiacs.umd.edu/~huytho/papers/HoChellappa_TIP2013.pdf"}, {"id": "84c98f21017db431a902cd20fb0999ce0f872659", "title": "Recognizing distinctive faces: a hybrid-similarity exemplar model account.", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/84c9/8f21017db431a902cd20fb0999ce0f872659.pdf"}, {"id": "f5001aca394bbe7067db1b65f231c627e286aa4c", "title": "Component-based global k-NN classifier for small sample size problems", "year": 2012, "pdf": "https://doi.org/10.1016/j.patrec.2012.05.020"}, {"id": "ff7d2d9762d6ed1ca6b868bc714b72a5f150f3f1", "title": "A comparative study of preprocessing mismatch effects in color image based face recognition", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/ff7d/2d9762d6ed1ca6b868bc714b72a5f150f3f1.pdf"}, {"id": "1455591d81c4ddabfe31de9f57f53e9b91e71fa2", "title": "Discriminant Dual-Tree Complex Wavelet Features for Face Recognition", "year": 2008, "pdf": null}, {"id": "8fe9cd45280696574a6afc10e5a06eb1888d82ee", "title": "Illumination Invariant Face Recognition Using Thermal Infrared Imagery", "year": "2001", "pdf": "https://pdfs.semanticscholar.org/8fe9/cd45280696574a6afc10e5a06eb1888d82ee.pdf"}, {"id": "60cdcf75e97e88638ec973f468598ae7f75c59b4", "title": "Face Annotation Using Transductive Kernel Fisher Discriminant", "year": 2008, "pdf": "http://www.cse.cuhk.edu.hk/~lyu/paper_pdf/tmm08face.pdf"}, {"id": "608dfcdbb393f44d4ae1520f6c6fdd736cee337c", "title": "Empirical Performance Analysis of Linear Discriminant Classi", "year": 1998, "pdf": "http://pdfs.semanticscholar.org/608d/fcdbb393f44d4ae1520f6c6fdd736cee337c.pdf"}, {"id": "38edcd91ef2ace3b3dca4c49a22d20a156f62385", "title": "Hierarchical Shape Modeling for Automatic Face Localization", "year": 2002, "pdf": "http://pdfs.semanticscholar.org/38ed/cd91ef2ace3b3dca4c49a22d20a156f62385.pdf"}, {"id": "830e5b1043227fe189b3f93619ef4c58868758a7", "title": "A survey on face detection in the wild: Past, present and future", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/830e/5b1043227fe189b3f93619ef4c58868758a7.pdf"}, {"id": "26779e1152bcb0072eda2abf4262c72fcfbcda2e", "title": "Color Face Recognition for Degraded Face Images", "year": "2009", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4804691"}, {"id": "b9b5624045c6f9d77fd1a029f4ff27aab26fa9fe", "title": "Feature Selection of Face Recognition Based on Improved Chaos Genetic Algorithm", "year": 2010, "pdf": null}, {"id": "79664cea983880bc4b9fbe4d4291b7d3f2b5dbc3", "title": "Dynamic Features for Iris Recognition", "year": 2012, "pdf": "https://doi.org/10.1109/TSMCB.2012.2186125"}, {"id": "4a0267f6e840d6632122a60e8fad1f8740eba8ca", "title": "Comparative study: face recognition on unspecific persons using linear subspace methods", "year": 2005, "pdf": "http://dahualin.org/publications/dhl05_compstudy.pdf"}, {"id": "29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d", "title": "SCface \u2013 surveillance cameras face database", "year": 2009, "pdf": "http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf"}, {"id": "6264717cccb3baf714974c37a724b652e5294b59", "title": "Local Curvelet Based Classification Using Linear Discriminant Analysis for Face Recognition", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/81ea/a34334d58f7b324d988162218332395f152f.pdf"}, {"id": "f0304ea9943e96e645b8702c2810cca517439465", "title": "Automatic Diagnosis of Diabetic Retinopathy by Hybrid Multilayer Feed Forward Neural Network", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/f030/4ea9943e96e645b8702c2810cca517439465.pdf"}, {"id": "7334b3ade39e3bd826bdd9bdae73d176e4a02caf", "title": "Gender Recognition Using Complexity-Aware Local Features", "year": "2014", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6977126"}, {"id": "9becfc370f4dad9f2e57b1a0fe3a60d177c1b2ea", "title": "A new face recognition system based on color histogram matching", "year": 2008, "pdf": null}, {"id": "b8e35566129299c3591af0fd4f127e5e0d0b5774", "title": "3D Facial Image Comparison using Landmarks", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/b8e3/5566129299c3591af0fd4f127e5e0d0b5774.pdf"}, {"id": "09333e0dcb709b1f67dd86fc308b604231627278", "title": "View-invariant Estimation of Height and Stride for Gait Recognition", "year": 2002, "pdf": "http://pdfs.semanticscholar.org/0933/3e0dcb709b1f67dd86fc308b604231627278.pdf"}, {"id": "26f4be90c444104ecd01f9727a7ac546ce63d13b", "title": "Automatic gait recognition using width vector mean", "year": 2009, "pdf": null}, {"id": "c57d84d3af11ccd02c0186b4f66d312576cea696", "title": "Detecting driver inattention by rough iconic classification", "year": 2013, "pdf": "https://doi.org/10.1109/IVS.2013.6629583"}, {"id": "055d3a41298fe36e0dab50c19d0c43eab5d1c020", "title": "Choosing Parameters of Kernel Subspace LDA for Recognition of Face Images Under Pose and Illumination Variations", "year": 2007, "pdf": "http://www.csee.wvu.edu/~richas/ML-Papers/KLDA.pdf"}, {"id": "41c56c69b20b3f0b6c8a625009fc0a4d317e047a", "title": "Integral Local Binary Patterns: A Novel Approach Suitable for Texture-Based Object Detection Tasks", "year": "2010", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5720366"}, {"id": "e6bbe7feb5633a361ffb6ed4c674d54574eb531e", "title": "Image quality and position variability assessment in minutiae-based fingerprint verification - Vision, Image and Signal Processing, IEE Proceedings-", "year": 2001, "pdf": "http://pdfs.semanticscholar.org/e6bb/e7feb5633a361ffb6ed4c674d54574eb531e.pdf"}, {"id": "aa50640de05b10b16a58d68a7e866173ffcbf698", "title": "Multimodal biometrics using geometry preserving projections", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/aa50/640de05b10b16a58d68a7e866173ffcbf698.pdf"}, {"id": "7ffa7a36e5414a0f2b16b1d8f93442ab15e2235d", "title": "The CMU Pose, Illumination, and Expression Database", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/7ffa/7a36e5414a0f2b16b1d8f93442ab15e2235d.pdf"}, {"id": "ac37d85cda078c36f69b411fa85caf66869af845", "title": "Boosting in Random Subspaces for Face Recognition", "year": 2006, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICPR.2006.337"}, {"id": "ce5ee0c014b5559c8f294c34076de7e978f84844", "title": "Efficient object detection by prediction in 3D space", "year": 2015, "pdf": "https://doi.org/10.1016/j.sigpro.2014.08.039"}, {"id": "7f9c3ee2d3a3db9922203cbd19f03708067a42ab", "title": "A Comparative Analysis of Face Recognition Algorithms", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/7f9c/3ee2d3a3db9922203cbd19f03708067a42ab.pdf"}, {"id": "9fe2fed0bbcb9e41e058031b98e7247fea854bac", "title": "Privacy preserving security using biometrics in cloud computing", "year": 2017, "pdf": null}, {"id": "e1c0beb01462d37a77c34909a02a29725c187f5e", "title": "GA-fisher: a new LDA-based face recognition algorithm with selection of principal components", "year": 2005, "pdf": "http://eecs.qmul.ac.uk/~jason/Research/PreprintVersion/GA-Fisher%20A%20New%20LDA-Based%20Face%20Recognition%20Algorithm%20With%20Selection%20of%20Principal%20Components.pdf"}, {"id": "c30f28a238925be05520c73c0977d87b58e1a6f8", "title": "Intelligent Multimedia Surveillance", "year": 2013, "pdf": null}, {"id": "5cbed05521a44b568c77879fb0b84e1fa27d12e0", "title": "Gait-Based Recognition of Humans Using Continuous HMMs", "year": 2002, "pdf": "http://pdfs.semanticscholar.org/c7fc/22b63c544b0d1a288892a3f0fe89cf9d22d7.pdf"}, {"id": "2df51d77de0cb69943ba491fc4726f9df3358512", "title": "A new technique for Face Recognition using 2D-Gabor Wavelet Transform with 2D-Hidden Markov Model approach", "year": 2013, "pdf": null}, {"id": "4430d41c36c731c16020037d25df3dcd237fd863", "title": "IJATRD 2012019 ) THERMAL RECOGNITION IN BIOMETRICS AUTHENTICATION Mr . Gopal", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/4430/d41c36c731c16020037d25df3dcd237fd863.pdf"}, {"id": "cf06e540e1cea9a915d8081cc75962f982a564a4", "title": "Enhancing sparsity via full rank decomposition for robust face recognition", "year": 2014, "pdf": "https://doi.org/10.1007/s00521-014-1582-4"}, {"id": "67c78fbef7ebcde1b8c4e42415e595fb78317133", "title": "Optimization of Face Recognition Algorithms for Smartphone Environment", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/67c7/8fbef7ebcde1b8c4e42415e595fb78317133.pdf"}, {"id": "40ef0fba414d41f23d9d8a3b0147cc579285d54b", "title": "Median null(Sw)-based method for face feature recognition", "year": 2013, "pdf": "https://doi.org/10.1016/j.amc.2013.01.005"}, {"id": "41235b815a3a69eb5ef48199e7ea7e98495e56a9", "title": "Learning Discriminative Local Patterns with Unrestricted Structure for Face Recognition", "year": 2013, "pdf": "http://www98.griffith.edu.au/dspace/bitstream/handle/10072/56813/91486_1.pdf?sequence=1"}, {"id": "2f0d620715e5a13e765fd36c418eeaf462c740e9", "title": "Gender classification using face recognition", "year": 2013, "pdf": null}, {"id": "f0c48ad56bd809ef4d2e166b4c1d9fd238356e00", "title": "Kernel Eigenfaces vs. Kernel Fisherfaces: Face recognition using kernel methods", "year": 2002, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FGR.2002.10001"}, {"id": "d57dca4413ad4f33c97ae06a5a7fc86dc5a75f8b", "title": "Gender recognition: Methods, datasets and results", "year": 2015, "pdf": "http://iplab.dmi.unict.it/sites/default/files/_11.pdf"}, {"id": "660c6a47ea29de2b4f40ac942ba682954118722f", "title": "Chapter 1 SUPER - RESOLUTION : LIMITS AND BEYOND", "year": 2001, "pdf": "http://pdfs.semanticscholar.org/c938/58d7a8c5a1347b632f34b30e2fb55bd3a99a.pdf"}, {"id": "0d595eda148555402d59750cb08f79bd52a859d5", "title": "Discriminant subclass-center manifold preserving projection for face feature extraction", "year": 2011, "pdf": "https://doi.org/10.1109/ICIP.2011.6116297"}, {"id": "54f0588b379b25ee6c22952e486d7da45bad7bab", "title": "Optimal feature selection for support vector machines", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/bab6/9d0954213851bc4aae50ece0ce8ac52bdedf.pdf"}, {"id": "94b60008e5f576f46bd3c385398cf2ecbb16f499", "title": "3D Model Based Face Recognition Using Inverse Compositional Image Alignment", "year": 2010, "pdf": null}, {"id": "22906e45359af4d2b6f279875e8c411844a5ce93", "title": "An alternative Gabor filtering scheme", "year": 2010, "pdf": "https://doi.org/10.1109/ICIP.2010.5651935"}, {"id": "03e83659f0fc98dd03c354a2cc7a90d585ff9cf5", "title": "Face Recognition Using Holistic Features and Within Class Scatter-Based PCA", "year": "2013", "pdf": "https://pdfs.semanticscholar.org/03e8/3659f0fc98dd03c354a2cc7a90d585ff9cf5.pdf"}, {"id": "dfee9e83ccf015a3835c399beda5d57d49d25d04", "title": "Road side video surveillance in traffic scenes using mapreduce framework for accident analysis", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/1490/2cf0b07eae6561184b12aef07e945936ed26.pdf"}, {"id": "801b1d399af0b40cbba15f89e19f2c7008ac192e", "title": "M\u00e9todo de reconhecimento da marcha humana por meio da fus\u00e3o das caracter\u00edsticas do movimento global", "year": "2010", "pdf": "http://doi.org/10.11606/T.18.2010.tde-24062010-153212"}, {"id": "cce261b47bbeec42cf4036e89e2413e25f66ce61", "title": "Gender recognition from facial images : 2 D or 3 D ?", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/cce2/61b47bbeec42cf4036e89e2413e25f66ce61.pdf"}, {"id": "aa62981beb24d6bf2a69be20cdc905ddda8ed440", "title": "Efficient MAP/ML similarity matching for visual recognition", "year": "1998", "pdf": "http://doi.org/10.1109/ICPR.1998.711290"}, {"id": "98b1e59c32c1e3c93fba0346e25892ed6904f486", "title": "Face recognition method based on fuzzy 2DPCA", "year": "2014", "pdf": "http://doi.org/10.1155/2014%2F919041"}, {"id": "d3d71a110f26872c69cf25df70043f7615edcf92", "title": "Learning Compact Feature Descriptor and Adaptive Matching Framework for Face Recognition", "year": 2015, "pdf": "https://www.cise.ufl.edu/~dihong/assets/07094272.pdf"}, {"id": "43d8a8737d763f2aba1d1aabbf1d9c74c25308ec", "title": "Visual interaction including biometrics information for a socially assistive robotics platform", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/43d8/a8737d763f2aba1d1aabbf1d9c74c25308ec.pdf"}, {"id": "2faa09413162b0a7629db93fbb27eda5aeac54ca", "title": "Quantifying how lighting and focus affect face recognition performance", "year": 2010, "pdf": "https://doi.org/10.1109/CVPRW.2010.5543228"}, {"id": "e2b3aae594035e58f72125e313e92c7c4cc9d5bb", "title": "Real-time moustache detection by combining image decolorization and texture detection with applications to facial gender recognition", "year": 2014, "pdf": "https://doi.org/10.1007/s00138-014-0597-2"}, {"id": "28f976c807d6caddf6192c59733ec12527d4caba", "title": "Fast eye localization based on pixel differences", "year": 2009, "pdf": "https://doi.org/10.1109/ICIP.2009.5414147"}, {"id": "9d9d496565c68a2d0ba5026aa527215ac5f82e23", "title": "Generating Discriminating Cartoon Faces Using Interacting Snakes", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/ce2c/e1fc69a1d1ad807773c90e65d293c609c5d2.pdf"}, {"id": "585f718ce44da9e4ebe639ea94bfc9c7301438a4", "title": "Gait Recognition Based on Fast Shape Matching", "year": 2009, "pdf": null}, {"id": "5808a08c92a4eff3e8de798dd43fb860513b342b", "title": "Finding and improving the key-frames of long video sequences for face recognition", "year": 2010, "pdf": "https://doi.org/10.1109/BTAS.2010.5634491"}, {"id": "91254b9dd1b7ab852bf6a6c346469cdf04cb0045", "title": "Distributed Object Detection With Linear SVMs", "year": 2014, "pdf": "https://doi.org/10.1109/TCYB.2014.2301453"}, {"id": "8c52fb72a46d5a001cea7ecb02588038ef1b909d", "title": "A Brain-Inspired Multi-Modal Perceptual System for Social Robots: An Experimental Realization", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8400512"}, {"id": "2b507ea9720402f8b634f8b6bf407f7d73cbbf55", "title": "Cancellable Biometrics and Multispace Random Projections", "year": 2006, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2006.49"}, {"id": "15c63e01ac051f01edcf76bf809ae41db0663d97", "title": "Wavelet Frame Accelerated Reduced Support Vector Machines", "year": 2008, "pdf": "http://user.hs-nb.de/~teschke/ps/40.pdf"}, {"id": "d6e7217ecf6d163b087efc0dbb8b43de700bff93", "title": "Local Two-Dimensional Canonical Correlation Analysis", "year": 2010, "pdf": "https://doi.org/10.1109/LSP.2010.2071863"}, {"id": "4113269f916117f975d5d2a0e60864735b73c64c", "title": "Regression and classification approaches to eye localization in face images", "year": "2006", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1613059"}, {"id": "cc4a90ec7c9d964f3b6dd2505ccba3da2ed29474", "title": "Regional multi-focus image fusion using clarity enhanced image segmentation and sparse representation", "year": 2013, "pdf": null}, {"id": "63a4105adbe182e67d8fd324de5c84a6df444294", "title": "Gender classification by LPQ features from intensity and Monogenic images", "year": 2017, "pdf": null}, {"id": "42ab71fde8210e9f36209a6fbec1642a3e476e0e", "title": "Attention driven face recognition: A combination of spatial variant fixations and glance", "year": 2011, "pdf": "http://www.researchgate.net/profile/Laiyun_Qing/publication/224238095_Attention_driven_face_recognition_A_combination_of_spatial_variant_fixations_and_glance/links/02e7e51b0765f7a4f8000000.pdf"}, {"id": "1ffedba43c470ee93a3cf9db547ec0b55f23b31f", "title": "Efficient and accurate face detection using heterogeneous feature descriptors and feature selection", "year": 2013, "pdf": "https://doi.org/10.1016/j.cviu.2012.09.003"}, {"id": "ae3fc0955509a9a8ca2686eab5b445af8e126524", "title": "Generalized 2-D Principal Component Analysis by Lp-Norm for Image Analysis", "year": 2016, "pdf": "https://doi.org/10.1109/TCYB.2015.2416274"}, {"id": "fd068e481929949e0ab792835a943043638d1b9d", "title": "Automatic Lip-Contour Extraction and Mouth-Structure Segmentation in Images", "year": 2011, "pdf": "http://doi.ieeecomputersociety.org/10.1109/MCSE.2010.145"}, {"id": "fe2fd2ef5523a9407d4e12300e5b2d0964f1fae5", "title": "Facial Feature Selection Based on SVMs by Regularized Risk Minimization", "year": 2006, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICPR.2006.540"}, {"id": "02e52393244e9c67291d65331d1e1199094d79ac", "title": "A Verification Method for Viewpoint Invariant Sign Language Recognition", "year": 2006, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICPR.2006.196"}, {"id": "1d679b371c9dfd833cee0925de483562d2bc7d88", "title": "Face Recognition using 3D Summation Invariant Features", "year": 2006, "pdf": "http://www.cecs.uci.edu/~papers/icme06/pdfs/0001733.pdf"}, {"id": "effd46389bc45dfdbf32f00eddeef6cf5f0f7947", "title": "Face Verification Using Kernel Principle Component Analysis", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/effd/46389bc45dfdbf32f00eddeef6cf5f0f7947.pdf"}, {"id": "58cb1414095f5eb6a8c6843326a6653403a0ee17", "title": "Face recognition using multiple facial features", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/58cb/1414095f5eb6a8c6843326a6653403a0ee17.pdf"}, {"id": "98eb723145da3b29726e272f61eaeec742eae1e4", "title": "Secure biometric-key generation with biometric helper", "year": 2008, "pdf": null}, {"id": "25866eb48b94e85fa675b1d393163d27ffd62ba6", "title": "Multiple feature subspaces analysis for single sample per person face recognition", "year": 2017, "pdf": null}, {"id": "3e7328f082648afba63b08af462559b7bb562250", "title": "Orthogonal discriminant vector for face recognition across pose", "year": "2012", "pdf": "http://doi.org/10.1016/j.patcog.2012.04.012"}, {"id": "4e6fe1ba927fe81a6f4529ea27985cc9431ed357", "title": "Voting Among Virtually Generated Versions of a Classification Problem", "year": 2012, "pdf": "https://doi.org/10.1109/TSMCB.2011.2177084"}, {"id": "dc291f96a4db1d74a9affb2c9dd4889a11fdf970", "title": "Lighting-aware face frontalization for unconstrained face recognition", "year": "2017", "pdf": "http://doi.org/10.1016/j.patcog.2017.03.024"}, {"id": "13891ab9bcd981f69a893640bb039e06ce70d910", "title": "Distance Approximating Dimension Reduction of Riemannian Manifolds", "year": 2010, "pdf": "https://doi.org/10.1109/TSMCB.2009.2025028"}, {"id": "7c2811070594a6cae2a1282670d8aabe77bb9c0d", "title": "Dedicated mechanisms for the attention system in humanoid robots", "year": 2004, "pdf": "https://doi.org/10.1109/ICHR.2004.1442113"}, {"id": "9648c8276d837fdf181bc43367bc28a65135ff14", "title": "Fusion of Gabor Feature Based Classifiers for Face Verification", "year": 2007, "pdf": null}, {"id": "2a7dab8eabc94b08fd466cd9c815f6e2bd649356", "title": "Pii: S0262-8856(99)00051-7", "year": 2000, "pdf": "http://pdfs.semanticscholar.org/2a7d/ab8eabc94b08fd466cd9c815f6e2bd649356.pdf"}, {"id": "38a9d62814b14076a9a6ec9b7d285adfefe48537", "title": "Discriminant adaptive edge weights for graph embedding", "year": 2008, "pdf": "https://doi.org/10.1109/ICASSP.2008.4518029"}, {"id": "5b1d78b160560db5f581e65289ce5e2f99eb9b1f", "title": "Twitter100k: A Real-World Dataset for Weakly Supervised Cross-Media Retrieval", "year": "2018", "pdf": "https://arxiv.org/pdf/1703.06618.pdf"}, {"id": "5b5962bdb75c72848c1fb4b34c113ff6101b5a87", "title": "Finding Celebrities in Billions of Web Images", "year": 2012, "pdf": "http://research.microsoft.com/en-us/um/people/leizhang/paper/TMM2011_Xiao.pdf"}, {"id": "961a6136409d9f322044b585fd9072267feef927", "title": "An IFS based approach for face recognition", "year": 2005, "pdf": "https://doi.org/10.1109/ICIP.2005.1530211"}, {"id": "bd47c106f57e8cace5347de80273906beb8ccec1", "title": "Hierarchical mixing linear support vector machines for nonlinear classification", "year": 2016, "pdf": "https://doi.org/10.1016/j.patcog.2016.02.018"}, {"id": "249d4ea043ca1917025b00341b4339c3d76fcad5", "title": "Human and Machine Performance on Periocular Biometrics Under Near-Infrared Light and Visible Light", "year": 2012, "pdf": "http://www3.nd.edu/~kwb/HollingsworthEtAlTIFS_2011.pdf"}, {"id": "8b1f697d81de1245c283b4f8f055b9b76badfa66", "title": "Test Sample Oriented Dictionary Learning for Face Recognition", "year": 2016, "pdf": "https://doi.org/10.1142/S0218126616500171"}, {"id": "33548dec33abfc66bba40ac3f9651c0605d6b537", "title": "CMML: a New Metric Learning Approach for Cross Modal Matching", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/3354/8dec33abfc66bba40ac3f9651c0605d6b537.pdf"}, {"id": "074faec2e546f292800c0c028912ced147b25218", "title": "Chapter 6 Face Recognition in the Thermal Infrared ?", "year": 2004, "pdf": "http://pdfs.semanticscholar.org/074f/aec2e546f292800c0c028912ced147b25218.pdf"}, {"id": "10e70a34d56258d10f468f8252a7762950830d2b", "title": "New Parallel Models for Face Recognition", "year": 2007, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CIS.2007.221"}, {"id": "810f5606a4769fc3dd99611acf805596fb79223d", "title": "Extraction of illumination invariant facial features from a single image using nonsubsampled contourlet transform", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/810f/5606a4769fc3dd99611acf805596fb79223d.pdf"}, {"id": "037c8cd66a6d549cfa078986af7e499c60cc2693", "title": "Hallucinating Faces by Interpolation and Principal Component Analysis", "year": 2009, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ISCID.2009.81"}, {"id": "b12ac473cab26e08657a6d8f000be8cbd5dd54aa", "title": "Cross-view gait recognition using view-dependent discriminative analysis", "year": "2014", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6996272"}, {"id": "b7f7a4df251ff26aca83d66d6b479f1dc6cd1085", "title": "Handling missing weak classifiers in boosted cascade: application to multiview and occluded face detection", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/b7f7/a4df251ff26aca83d66d6b479f1dc6cd1085.pdf"}, {"id": "9e02aae0d83a84f05a4fad4b8dcb0d664b01b12d", "title": "Smart video access control system with hybrid features in complicated environment", "year": 2016, "pdf": null}, {"id": "fad721b7af838964c98bbb3ebb3f6265b83f950d", "title": "Adult Image Content Classification Using Global Features and Skin Region Detection", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/fad7/21b7af838964c98bbb3ebb3f6265b83f950d.pdf"}, {"id": "0d4740b19fb6148bc22b7c07a5c750debc242dd5", "title": "Recognizing Expressions in a New Database Containing Played and Natural Expressions", "year": "2006", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699110"}, {"id": "39a71ceb241f34b30ffd248669346c059dd1ec97", "title": "Gait Recognition Based on Human Body Components", "year": 2007, "pdf": "http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_ICIP_2007/pdfs/0100353.pdf"}, {"id": "fa4a215cf6ae71c2c939375db7f5318f644c976a", "title": "Faces forming traces: Neurophysiological correlates of learning naturally distinctive and caricatured faces", "year": 2012, "pdf": "https://doi.org/10.1016/j.neuroimage.2012.06.080"}, {"id": "2d3f1e0f163f7b031539ba403b5f94fa0bdf2a59", "title": "Multi-modal biometrics for mobile authentication", "year": 2014, "pdf": "https://doi.org/10.1109/BTAS.2014.6996269"}, {"id": "7aa76166f0a65a144de11e06cb0ed8f1dff2c197", "title": "ASePPI: Robust Privacy Protection Against De-Anonymization Attacks", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014911"}, {"id": "f9d664c21d4693eeea04c824f301694dc5de4af1", "title": "Employing vector quantization on detected facial parts for face recognition", "year": 2016, "pdf": "https://doi.org/10.1109/GlobalSIP.2016.7906038"}, {"id": "d3f1b7cd0dfcf76e2011b54040f724c3573a6ccd", "title": "Fast Group Verification System for Intelligent Robot Service", "year": 2007, "pdf": "https://doi.org/10.1109/TCE.2007.4429277"}, {"id": "548364f5f2fa0290f5730586ffbbf5e1a99ac18f", "title": "Puzzling face verification algorithms for privacy protection", "year": 2014, "pdf": "https://doi.org/10.1109/WIFS.2014.7084305"}, {"id": "0296fc4d042ca8657a7d9dd02df7eb7c0a0017ad", "title": "Subspace Learning from Image Gradient Orientations", "year": 2012, "pdf": "http://ibug.doc.ic.ac.uk/media/uploads/documents/subspace_igo.pdf"}, {"id": "384f972c81c52fe36849600728865ea50a0c4670", "title": "Multi-Fold Gabor, PCA and ICA Filter Convolution Descriptor for Face Recognition", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/dad7/3d70b4fa77d67c5c02e3ecba21c52ab9a386.pdf"}, {"id": "816283d4313775c8a659edce08e5e10d75a338b3", "title": "Finding Structure with Randomness: Stochastic Algorithms for Constructing Approximate Matrix Decompositions", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/8162/83d4313775c8a659edce08e5e10d75a338b3.pdf"}, {"id": "69c03f69ddf77586f83bf13d473abf53a70e6793", "title": "Flexible multi-classifier architecture for face recognition systems", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/69c0/3f69ddf77586f83bf13d473abf53a70e6793.pdf"}, {"id": "08ae1f8dea9b5ce7923db6469443f43f2c290510", "title": "Progressive sparse representation-based classification using local discrete cosine transform evaluation for image recognition", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/c4f6/71448a86c97ab9de0107cab307e9c2a991cd.pdf"}, {"id": "027b940ef4de5aef8b2fd0bebc739445147baf1d", "title": "Secure remote matching with privacy: Scrambled support vector vaulted verification (S<sup>2</sup>V<sup>3</sup>)", "year": 2012, "pdf": "http://doi.ieeecomputersociety.org/10.1109/WACV.2012.6163018"}, {"id": "9b5db4e1613cd86e6ade687bed5b01cc9f66811d", "title": "Face hallucination revisited: A joint framework", "year": 2013, "pdf": "https://doi.org/10.1109/ICIP.2013.6738203"}, {"id": "2d03f0ae5920e6ea91b47b711ed0e8ece65e19dd", "title": "Multi-biometrics using facial appearance, shape and temperature", "year": "2004", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1301507"}, {"id": "695423ede04c7ccf05997c123fd8ab9b94c4a088", "title": "Framework for Performance Evaluation of Face, Text, and Vehicle Detection and Tracking in Video: Data, Metrics, and Protocol", "year": 2009, "pdf": "http://figment.csee.usf.edu/~vmanohar/TPAMI-2007-07-0424-Main-2Col.pdf"}, {"id": "b71d1aa90dcbe3638888725314c0d56640c1fef1", "title": "Iranian Face Database with age, pose and expression", "year": 2007, "pdf": null}, {"id": "46299c9db8a4570d060ee8fc1616c4a148056365", "title": "IJCSI Publicity Board 2011", "year": "2010", "pdf": "https://pdfs.semanticscholar.org/3a4a/2cb2328e2f416be0be012e5d580975943554.pdf"}, {"id": "ba20e58fcca7537fe3cb46a7dea03b5915373ba7", "title": "Face Recognition by Extending Elastic Bunch Graph Matching with Particle Swarm Optimization", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/ba20/e58fcca7537fe3cb46a7dea03b5915373ba7.pdf"}, {"id": "2fdcf1c89b231ac4cff03fd6768583b3d4400b41", "title": "A low complexity JPEG domain face recognition approach using low frequency coefficients", "year": 2011, "pdf": "https://doi.org/10.1109/ICICS.2011.6174260"}, {"id": "d1a9f71e5563a1bb2f956b9b805cfc6aafe4a6e6", "title": "Robust Methods for Visual Tracking and Model", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/d1a9/f71e5563a1bb2f956b9b805cfc6aafe4a6e6.pdf"}, {"id": "ed7d4400d5e07efb7b7cc3624dbe5963b77fda33", "title": "A score level fusion framework for gait-based human recognition", "year": 2013, "pdf": "https://doi.org/10.1109/MMSP.2013.6659286"}, {"id": "12e0695c9ae037c65dfdfe80b0f2ef1246d4b104", "title": "2DPCANet: a deep leaning network for face recognition", "year": "2017", "pdf": "http://doi.org/10.1007/s11042-017-4923-3"}, {"id": "e5d4e5d68ab05a02dcab035a0d8ac8123a1bdcb0", "title": "Using the idea of the sparse representation to perform coarse-to-fine face recognition", "year": "2013", "pdf": "http://doi.org/10.1016/j.ins.2013.02.051"}, {"id": "cf6fc9efa22a50d76123d3b2cba46202e20e9a5d", "title": "Ensemble of Piecewise FDA Based on Spatial Histograms of Local (Gabor) Binary Patterns for Face Recognition", "year": "2006", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699914"}, {"id": "a796a9e70e2aedf3ab1197583a90e6619a8aacd0", "title": "Face Class Modeling based on Local Appearance for Recognition", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/e273/af81e7e1f7f4801d51a774d412062add6f93.pdf"}, {"id": "940865fc3f7ee5b386c4188c231eb6590db874e9", "title": "Security and Surveillance System for Drivers Based on User Profile and learning systems for Face Recognition", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/9408/65fc3f7ee5b386c4188c231eb6590db874e9.pdf"}, {"id": "268afd5de8fa32cadd4a90bf0bb1c9938a245ab4", "title": "Image Compression Effects in Face Recognition Systems", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/840c/ffafc48ebc35c707bfed81bad8013df1a971.pdf"}, {"id": "4338b6137ee0d02f4bebd14e9a2430c837590209", "title": "Automatic eyeglasses removal from face images", "year": 2003, "pdf": "http://people.csail.mit.edu/~celiu/pdfs/eyeglasses-TPAMI.pdf"}, {"id": "47719d391417a237701c5e275ebb1034418e20f2", "title": "Human Face Processing with 1.5D Models", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/4771/9d391417a237701c5e275ebb1034418e20f2.pdf"}, {"id": "c0b3159ca4bc847913e82c9a4c1f96f14e0e52db", "title": "An effective biometric discretization approach to extract highly discriminative, informative, and privacy-protective binary representation", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/c0b3/159ca4bc847913e82c9a4c1f96f14e0e52db.pdf"}, {"id": "f7094d4888e9ef039d283267d310b0673ff0e423", "title": "Face recognition: a new feature selection and classification technique", "year": 2004, "pdf": "http://pdfs.semanticscholar.org/f709/4d4888e9ef039d283267d310b0673ff0e423.pdf"}, {"id": "9055e7415beb421a861df974147394d09e442bed", "title": "Why the alternative PCA provides better performance for face recognition", "year": 2009, "pdf": "http://doi.ieeecomputersociety.org/10.1109/WIAMIS.2009.5031454"}, {"id": "38f56240c642677f2245aebe94fb846988487570", "title": "Mining patterns of orientations and magnitudes for face recognition", "year": 2011, "pdf": "http://www.csis.pace.edu/~ctappert/dps/2011IJCB/papers/217.pdf"}, {"id": "4fcc8e5d78c166b40e5b6ee439edc9092811c159", "title": "Recent advances in biometric person authentication", "year": 2002, "pdf": "http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/kotropoulos2002b.pdf"}, {"id": "98846b549af3828d835fe638eafe45d18fd0cb95", "title": "Adaptation to Walking Direction Changes for Gait Identification", "year": "2006", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699156"}, {"id": "dbb86080ce125b29ddbfe114f8826050113de3f3", "title": "DST Feature Based Locality Preserving Projections for Face Recognition", "year": 2010, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CIS.2010.69"}, {"id": "224fc841ad5b6766f828fe54f00e4f6153354627", "title": "Privacy preserving optics for miniature vision sensors", "year": 2015, "pdf": "https://doi.org/10.1109/CVPR.2015.7298628"}, {"id": "4bb80e2d99a5d80b9b90fe4ad3d76c2c14270a22", "title": "Image Based Face Replacement in Video", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/4bb8/0e2d99a5d80b9b90fe4ad3d76c2c14270a22.pdf"}, {"id": "76a52ebfc5afd547f8b73430ec81456cf25ddd69", "title": "Gender and age recognition for video analytics solution", "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AIPR.2014.7041914"}, {"id": "0d506bef264b0a38465934810be8869479733ef6", "title": "Cost-Sensitive Face Recognition", "year": "2008", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4587815"}, {"id": "8bf4f45460536852c5f4189de009d0d1bff3ccde", "title": "Ensembles of Correlation Filters for Object Detection", "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/WACV.2015.129"}, {"id": "aad43bc579a9b1b4e998b8399a820c2c26001506", "title": "Cancellable biometrics and annotations on BioHash", "year": "2008", "pdf": "http://doi.org/10.1016/j.patcog.2007.12.002"}, {"id": "71bece8ec4934e3034f76d8ba19199c5b8ec52ea", "title": "Illumination Variation in Face Recognition: A Review", "year": 2009, "pdf": null}, {"id": "299a7614b9f42fc292c74fb64f14940835e36866", "title": "Gait recognition by dynamic cues", "year": 2008, "pdf": "http://www.researchgate.net/profile/Imed_Bouchrika/publication/224374880_Gait_recognition_by_dynamic_cues/links/004635258ea7e1df05000000.pdf"}, {"id": "fccd2c78a29ba3fbb12377665c65d065599a0ae2", "title": "Nearest-Subspace Patch Matching for face recognition under varying pose and illumination", "year": "2008", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813452"}, {"id": "5da740682f080a70a30dc46b0fc66616884463ec", "title": "Real-Time Head Pose Estimation Using Multi-variate RVM on Faces in the Wild", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/5da7/40682f080a70a30dc46b0fc66616884463ec.pdf"}, {"id": "a03f51c5a56401986e451e9d50ace2bc4686e12c", "title": "Face recognition using local gradient binary count pattern", "year": 2015, "pdf": "https://doi.org/10.1117/1.JEI.24.6.063003"}, {"id": "552020318f81082b06d7ba12f0010e27d360d77b", "title": "Decision fusion for frontal face verification", "year": 2008, "pdf": null}, {"id": "aa36d50e7cb584d68a3eef0d3345954aa58f63df", "title": "Sub-Image Homomorphic Filtering Technique for Improving Facial Identification under Difficult Illumination Conditions", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/aa36/d50e7cb584d68a3eef0d3345954aa58f63df.pdf"}, {"id": "5236d6d60cdc0fbdb3caf78bccecca5d92b3d67f", "title": "Color Local Texture Features for Color Face Recognition", "year": 2012, "pdf": "https://doi.org/10.1109/TIP.2011.2168413"}, {"id": "ba80a5a7848ebe39fc96bb2a5959c4daf81d3ffc", "title": "Security using image processing and deep convolutional neural networks", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8376292"}, {"id": "9e10ea753b9767aa2f91dafe8545cd6f44befd7f", "title": "Learning discriminative local binary patterns for face recognition", "year": "2011", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771444"}, {"id": "e39c1a6d7f0e0f527819a105ab1760efefd46a4e", "title": "Gait recognition using linear time normalization", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/e39c/1a6d7f0e0f527819a105ab1760efefd46a4e.pdf"}, {"id": "a5dd647ff98d8ac9642a884c501de9a7aaf9a1b7", "title": "ICANet : a simple cascade linear convolution network for face recognition", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/a5dd/647ff98d8ac9642a884c501de9a7aaf9a1b7.pdf"}, {"id": "d628aabf1a666a875e77c3d3fee857cd25891947", "title": "Eye detection in unrestrained settings using efficient match kernels and SVM classification", "year": 2016, "pdf": "https://doi.org/10.1109/SMC.2016.7844663"}, {"id": "3ac3386a9d1a5651ca2062d5c1b23766bea2568e", "title": "Automatic 3D Facial Feature Extraction Algorithm", "year": 2008, "pdf": "https://doi.org/10.1109/NTMS.2008.ECP.28"}, {"id": "40f6207b722c739c04ba5a41f7b22d472aeb08ec", "title": "PFID: Pittsburgh fast-food image dataset", "year": 2009, "pdf": "http://www.lb.cs.cmu.edu/~rahuls/pub/icip2009-rahuls.pdf"}, {"id": "a080154a6668cc9d37944a9ae4650a14b9146aa7", "title": "Enhanced maximum likelihood face recognition", "year": 2000, "pdf": "http://pdfs.semanticscholar.org/a080/154a6668cc9d37944a9ae4650a14b9146aa7.pdf"}, {"id": "30ec7ddd215c69d4c196842f4328ef18462bab2a", "title": "A New Gait Recognition Method Based on Body Contour", "year": 2006, "pdf": "https://doi.org/10.1109/ICARCV.2006.345148"}, {"id": "3600f9def4e619e154a59df50dffe3cb23300e42", "title": "A Grey Wolf Optimizer for Modular Granular Neural Networks for Human Recognition", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/3600/f9def4e619e154a59df50dffe3cb23300e42.pdf"}, {"id": "3447fc311a3adf60d36283b51c1a1e0d3be7416c", "title": "A Generic Framework for Efficient 2-D and 3-D Facial Expression Analogy", "year": 2007, "pdf": "http://domino.mpi-inf.mpg.de/intranet/ag4/ag4publ.nsf/fff4f54111fe8e56c12567530068624e/1a5aa9e45d0e4fa8c125731a00516457/$FILE/manu.pdf"}, {"id": "574751dbb53777101502419127ba8209562c4758", "title": "Gender classification from unaligned facial images using support subspaces", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/5747/51dbb53777101502419127ba8209562c4758.pdf"}, {"id": "8bcee0c84759a2bf17d8f8e77b8a393c6f823ded", "title": "Optimized Performance of 2DPCA Approach in Face Recognition System", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/8bce/e0c84759a2bf17d8f8e77b8a393c6f823ded.pdf"}, {"id": "6f9c7a5d99a33c25ee384f760108e07f5923ccee", "title": "An adaptive approximation image reconstruction method for single sample problem in face recognition using FLDA", "year": 2014, "pdf": "https://doi.org/10.1007/s11042-014-2168-y"}, {"id": "8816ee1e23983f5a4340743c7744a336fae02d60", "title": "Face Recognition Using Boosted Local Features", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/8816/ee1e23983f5a4340743c7744a336fae02d60.pdf"}, {"id": "fca58cb8a1304c9180e939d82be2163f3a46dd7d", "title": "Using eye movements as an index of implicit face recognition in autism spectrum disorder.", "year": 2012, "pdf": null}, {"id": "c315a050478781da65556a745e01286ee4a8676e", "title": "Guide to Biometrics", "year": 2004, "pdf": null}, {"id": "48721c3e13d2c1373a1638b8df0b3a4ab0cb5737", "title": "Log-Euclidean Kernels for Sparse Representation and Dictionary Learning", "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751309"}, {"id": "14a66e11feb81045cb9f13211a30bf95f18b608f", "title": "A Detected Sub - Sequence Facial Similarity Cue Torso Color Distance Cue", "year": "", "pdf": "http://pdfs.semanticscholar.org/14a6/6e11feb81045cb9f13211a30bf95f18b608f.pdf"}, {"id": "0422cd0e5f86b04039ee468c095a87c843a66187", "title": "Ear Recognition by means of a Rotation Invariant Descriptor", "year": 2006, "pdf": "http://www.researchgate.net/profile/Daniel_Riccio/publication/220929336_Ear_Recognition_by_means_of_a_Rotation_Invariant_Descriptor/links/53fe16560cf283c3583b1eec.pdf"}, {"id": "3cd10f6f24c49ce677a18f0984ff4466333d8d13", "title": "Correcting rolling-shutter distortion of CMOS sensors using facial feature detection", "year": 2010, "pdf": "https://www.wjscheirer.com/papers/wjs_btas2010a_rolling.pdf"}, {"id": "38f1d8d25c0332798e0929594af2c43092d2c5c8", "title": "Face recognition via fast dense correspondence", "year": 2017, "pdf": null}, {"id": "5e832ea5328cdcc9b4346458672ad8288a56c0a7", "title": "Illumination-robust face recognition with Block-based Local Contrast Patterns", "year": 2017, "pdf": "https://doi.org/10.1109/ICASSP.2017.7952390"}, {"id": "ff42ec628b0980909bbb84225d0c4f8d9ac51e03", "title": "Convergent 2-D Subspace Learning With Null Space Analysis", "year": 2008, "pdf": "https://doi.org/10.1109/TCSVT.2008.2005799"}, {"id": "4d423acc78273b75134e2afd1777ba6d3a398973", "title": "International Conference on Automatic Face and Gesture Recognition The CMU Pose , Illumination , and Expression ( PIE ) Database", "year": 2002, "pdf": "http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf"}, {"id": "7d7870b7633678db2d39d4a5d69d10337ca827d9", "title": "Introducing set of internal parameters for Laplacian faces to enhance performance under varying conditions", "year": 2009, "pdf": null}, {"id": "bd0c4afe4511816a1a0a8d11f7d6ea753e63cb9a", "title": "An automated chimpanzee identification system using face detection and recognition", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/bd0c/4afe4511816a1a0a8d11f7d6ea753e63cb9a.pdf"}, {"id": "b3f07763288c67d9f08b4a2180e06c0468856629", "title": "GREYC keystroke: A benchmark for keystroke dynamics biometric systems", "year": "2009", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339051"}, {"id": "c28442776925f1bc941495ade5638d6d5b9c85d5", "title": "N-division output coding method applied to face recognition", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/c284/42776925f1bc941495ade5638d6d5b9c85d5.pdf"}, {"id": "cda08b49c91c805c4820238f30a5118f30e55bfe", "title": "SFS Based View Synthesis for Robust Face Recognition", "year": 2000, "pdf": "http://pdfs.semanticscholar.org/cda0/8b49c91c805c4820238f30a5118f30e55bfe.pdf"}, {"id": "3fd7c26c6aec1f2c522cced79959906c9ef64867", "title": "Toward Pose-Invariant 2-D Face Recognition Through Point Distribution Models and Facial Symmetry", "year": 2007, "pdf": "https://doi.org/10.1109/TIFS.2007.903543"}, {"id": "838d5022c381c593cdfd4fb5c99a019fe39d94d9", "title": "Compressing arrays of classifiers using Volterra-neural network: application to face recognition", "year": 2012, "pdf": "http://www.researchgate.net/profile/Diego_Milone/publication/257435774_Compressing_arrays_of_classifiers_using_Volterra-neural_network_application_to_face_recognition/links/0c96052555b2a0bad9000000.pdf"}, {"id": "6d071848a3fd4e6c3184db0a68a41559ec9a47c8", "title": "An approach to Enhance Automatic Diagnosis of Diabetic Retinopathy and Classification by Hybrid Multilayer Feed forward Neural Networks by Genetic Algorithm", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/6d07/1848a3fd4e6c3184db0a68a41559ec9a47c8.pdf"}, {"id": "c8db8764f9d8f5d44e739bbcb663fbfc0a40fb3d", "title": "Modeling for part-based visual object detection based on local features", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/c8db/8764f9d8f5d44e739bbcb663fbfc0a40fb3d.pdf"}, {"id": "1f2551b2acfb6895e91e39ae36a51335893a849f", "title": "3D Face Recognition Using Face Feature Points Based on Parallel Stereo Vision", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/1f25/51b2acfb6895e91e39ae36a51335893a849f.pdf"}, {"id": "0f1cbe4e26d584c82008ccef9fb1e4669b82de1f", "title": "Stability as performance metric for subjective pattern recognition - application of Electoral College in face recognition", "year": 2008, "pdf": "https://doi.org/10.1109/ICPR.2008.4761064"}, {"id": "2a8fa4e068968915f65c4f127167d485d0299b39", "title": "Parametric and Nonparametric Methods for the Statistical Evaluation of Human ID Algorithms", "year": 2001, "pdf": "http://pdfs.semanticscholar.org/67ed/cbd4f6a5d65329c3ec1a31775f39db56894a.pdf"}, {"id": "0b302889378874925891387fd5b97f3e3de6dcea", "title": "A discriminative feature space for detecting and recognizing faces", "year": 2004, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2004.9"}, {"id": "cb0ba5b3e5c64010a03840ac1fbae12e4959d4a5", "title": "Fast resolution aware model fitting for noisy low resoluton image", "year": 2011, "pdf": null}, {"id": "0c95bccca09f89a3036af0f9c3dac318f0f249b9", "title": "Choice of biometrics", "year": 2014, "pdf": null}, {"id": "73a682a9bd0546c7507feb00f15627e561752df4", "title": "1D-LDAvs. 2D-LDA:When is vector-based linear discriminant analysis better thanmatrix-based?", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/73a6/82a9bd0546c7507feb00f15627e561752df4.pdf"}, {"id": "bbb440b6b8e2d40a8681c12e467698179483cea0", "title": "Fusion of face and iris features extraction based on steerable pyramid representation for multimodal biometrics", "year": 2011, "pdf": null}, {"id": "8606b6cc617968026a492da963d151f2a5def5e8", "title": "Dynamic Laplacian Principle Component Analysis on Objective Space", "year": 2008, "pdf": null}, {"id": "32b33aa8b5f5f26487b7bf39c3da16e5deec25c4", "title": "Scale Normalized Radial Fourier Transform as a Robust Image Descriptor", "year": 2014, "pdf": "https://doi.org/10.1109/ICPR.2014.684"}, {"id": "492ad82db287f3dca28c42e10da2dd98d3397bb1", "title": "Robust face recognition using wavelet transform and autoassociative neural network", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/492a/d82db287f3dca28c42e10da2dd98d3397bb1.pdf"}, {"id": "18727adf3e63de90674fcafd8b1f5e0059669e84", "title": "A Comparative Study of Pca, Ica and Lda", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/1872/7adf3e63de90674fcafd8b1f5e0059669e84.pdf"}, {"id": "b408044db850f4446ad0d3ea0ae35986f23a4e07", "title": "An effective learning strategy for cascaded object detection", "year": "2016", "pdf": "http://doi.org/10.1016/j.ins.2016.01.021"}, {"id": "312a261d15bf3f3ae5187069aed5bd2821f881bf", "title": "Fusing hierarchical multi-scale local binary patterns and virtual mirror samples to perform face recognition", "year": 2015, "pdf": "https://doi.org/10.1007/s00521-015-1863-6"}, {"id": "aa9b924c88b75909871585cbadbed2d76df7dbdf", "title": "The effects of item familiarity on the neural correlates of successful associative memory encoding.", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/aa9b/924c88b75909871585cbadbed2d76df7dbdf.pdf"}, {"id": "5860cf0f24f2ec3f8cbc39292976eed52ba2eafd", "title": "COMPUTATION EvaBio: A TOOL FOR PERFORMANCE EVALUATION IN BIOMETRICS", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/5860/cf0f24f2ec3f8cbc39292976eed52ba2eafd.pdf"}, {"id": "8746db1ddf564856f01555380903499553c03a4a", "title": "Half-Face Dictionary Integration for Representation-Based Classification", "year": 2015, "pdf": "https://doi.org/10.1109/TCYB.2015.2508645"}, {"id": "01cfa4d1a4b59f3606c2273f5d5b2b24a0361eb0", "title": "Uncooperative gait recognition by learning to rank", "year": "2014", "pdf": "http://doi.org/10.1016/j.patcog.2014.06.010"}, {"id": "cf3250327b9c7437c3ef95b868ce19a0e35f6d82", "title": "The Design and Implementation of a Performance Evaluation Tool for the Face Recognition System", "year": "2007", "pdf": null}, {"id": "8897dd825230695a8a669b29a4d1b284373adb31", "title": "Face Recognition using Co - occurrence Matrix of Local Average Binary Pattern ( CMLABP )", "year": "", "pdf": "http://pdfs.semanticscholar.org/8897/dd825230695a8a669b29a4d1b284373adb31.pdf"}, {"id": "290136947fd44879d914085ee51d8a4f433765fa", "title": "On a taxonomy of facial features", "year": 2010, "pdf": "http://www.cse.msu.edu/biometrics/Publications/Face/KlareJain_TaxonomyFacialFeatures_BTAS10.pdf"}, {"id": "7c1e8013bb56add82a0b3ec8df38cdf09ca9d002", "title": "The FN400 indexes familiarity-based recognition of faces.", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/7c1e/8013bb56add82a0b3ec8df38cdf09ca9d002.pdf"}, {"id": "889bc64c7da8e2a85ae6af320ae10e05c4cd6ce7", "title": "Using Support Vector Machines to Enhance the Performance of Bayesian Face Recognition", "year": 2007, "pdf": "http://mmlab.ie.cuhk.edu.hk/archive/2007/IFS07_face.pdf"}, {"id": "3b9eaf8d913f99adeb9192f68808efb7d2c0fac5", "title": "A Statistical Multiresolution Approach for Face Recognition Using Structural Hidden Markov Models", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/3b9e/af8d913f99adeb9192f68808efb7d2c0fac5.pdf"}, {"id": "72962038460e32b0dd01d083f7f4049be36a34b9", "title": "Face Recognition Under Occlusions and Variant Expressions With Partial Similarity", "year": "2009", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4895689"}, {"id": "19fbf78b32d40805063ad52117be9df69cf10678", "title": "Hierarchical Ensemble of Global and Local Classifiers for Face Recognition", "year": 2007, "pdf": "http://www.researchgate.net/profile/Wen_Gao2/publication/220501605_Hierarchical_Ensemble_of_Global_and_Local_Classifiers_for_Face_Recognition/links/54bd11a10cf218d4a1697cba.pdf"}, {"id": "aa94f214bb3e14842e4056fdef834a51aecef39c", "title": "Reconhecimento de padro\u0303es faciais: Um estudo", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/aa94/f214bb3e14842e4056fdef834a51aecef39c.pdf"}, {"id": "19d47166dbd4748a889d1a2fcb2f899b76c83252", "title": "Uncorrelated Discriminant Vectors vs. Orthogonal Discriminant Vectors in Appearance-Based Face Recognition", "year": 2009, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ISCID.2009.257"}, {"id": "bab69d0954213851bc4aae50ece0ce8ac52bdedf", "title": "' s personal copy Optimal feature selection for support vector machines", "year": "2009", "pdf": "https://pdfs.semanticscholar.org/bab6/9d0954213851bc4aae50ece0ce8ac52bdedf.pdf"}, {"id": "056d1637fac0510146431a03d81de1cbf1147d65", "title": "UHDB11 Database for 3D-2D Face Recognition", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/056d/1637fac0510146431a03d81de1cbf1147d65.pdf"}, {"id": "1ee9598f88f40dabb70965a74eed87aedb276171", "title": "Face recognition using Histogram of co-occurrence Gabor phase patterns", "year": 2013, "pdf": "https://doi.org/10.1109/ICIP.2013.6738572"}, {"id": "d6cf3cab269877c58a16be011b74e07838d957c2", "title": "Face verification and identification using Facial Trait Code", "year": 2009, "pdf": "http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0162.pdf"}, {"id": "2f95509b8e2e7a7ab10e7bace8193e087102f625", "title": "A two step method to recover occluded part of face", "year": 2010, "pdf": null}, {"id": "eb8a21f3eb2cae9d3da79dfa50a3372c4c426665", "title": "Fisher Discriminant Analysis With L1-Norm", "year": 2014, "pdf": "https://doi.org/10.1109/TCYB.2013.2273355"}, {"id": "ee6474bde6fa652ea14ba1a0f29ee551c3a1d5f5", "title": "Kernel inverse Fisher discriminant analysis for face recognition", "year": 2014, "pdf": "https://doi.org/10.1016/j.neucom.2012.12.075"}, {"id": "a91db6f86cc8acfcfaa535867b05383b6476c739", "title": "Local Feature Matching For Face Recognition", "year": 2006, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CRV.2006.48"}, {"id": "305cfba1e9be3f0bef1f0474a6fa86f76462087e", "title": "CLRMA: Compact Low Rank Matrix Approximation for Data Compression", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/305c/fba1e9be3f0bef1f0474a6fa86f76462087e.pdf"}, {"id": "354ddc8976a762ee03fb78b73adc3b5312e5f2a5", "title": "Accurate Eye Center Location through Invariant Isocentric Patterns", "year": 2012, "pdf": "https://staff.fnwi.uva.nl/th.gevers/pub/GeversPAMI12.pdf"}, {"id": "e73c0482c4f2e30a65affc189f6f3df6f2751a20", "title": "Combining local face image features for identity verification", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/e73c/0482c4f2e30a65affc189f6f3df6f2751a20.pdf"}, {"id": "1c628956dd149ce177aa420cdf89c51e37ab8517", "title": "Gabor Wavelets and AdaBoost in Feature Selection for Face Verification", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/1c62/8956dd149ce177aa420cdf89c51e37ab8517.pdf"}, {"id": "8623945e67548becb658ac2866c2fd28ad0aebac", "title": "Studying Human Face Recognition with the Gaze-Contingent Window Technique", "year": "2004", "pdf": "https://pdfs.semanticscholar.org/5f01/9502b47f0780c7e626acb9210d1dcb4472c0.pdf"}, {"id": "0f96d5e621ba6a843a28e303d8bf4a669d8f2052", "title": "Analysis of the Effect of Image Resolution on Automatic Face Gender Classification", "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICPR.2014.56"}, {"id": "96fa05f55b048b33818dc89f185a479b9f0d4a0a", "title": "A novel approach for face recognition based on stereo image processing algorithm", "year": 2008, "pdf": null}, {"id": "d54f508c943b8415bfdd30d9210869ec93ff3f03", "title": "A method of illumination compensation for human face image based on quotient image", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/d54f/508c943b8415bfdd30d9210869ec93ff3f03.pdf"}, {"id": "cadb9a014a4c5bbec57aaf30391f472fa4b69b4d", "title": "PCA versus LDA - Pattern Analysis and Machine Intelligence, IEEE Transactions on", "year": 2001, "pdf": "http://pdfs.semanticscholar.org/cadb/9a014a4c5bbec57aaf30391f472fa4b69b4d.pdf"}, {"id": "1db44d94f6a4eaa3780c251446fa0fba14dfae44", "title": "Rapid prefrontal cortex activation towards aversively paired faces and enhanced contingency detection are observed in highly trait-anxious women under challenging conditions", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/1db4/4d94f6a4eaa3780c251446fa0fba14dfae44.pdf"}, {"id": "536d1f74c6543afcf2bc711befd82ac7886d1c33", "title": "Fusing Shearlets and LBP Feature Sets for Face Recognition", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/536d/1f74c6543afcf2bc711befd82ac7886d1c33.pdf"}, {"id": "7a0ae1abc9b999ce7dfdd7879f5c29b1992f254b", "title": "Fusion of Silhouette Based Gait Features for Gait Recognition", "year": "", "pdf": "http://pdfs.semanticscholar.org/7a0a/e1abc9b999ce7dfdd7879f5c29b1992f254b.pdf"}, {"id": "b3b95496fb6815b3fff344225aae9a35611913c9", "title": "Image description with 1D local patterns by multi-scans: An application to face recognition", "year": 2010, "pdf": "https://doi.org/10.1109/ICIP.2010.5650586"}, {"id": "1832204e7a0a17390e1335cba7be9b922ee4fa57", "title": "Bilateral random projections", "year": 2012, "pdf": "http://arxiv.org/abs/1112.5215"}, {"id": "0a5d0fece90774c6be8416bc58e125087e82fb82", "title": "Precise Eye Localization with AdaBoost and Fast Radial Symmetry", "year": 2006, "pdf": null}, {"id": "5c2ad03a1f8f08a44c597870a2d3f9e518833668", "title": "Face verification with changeable templates", "year": 2009, "pdf": "https://doi.org/10.1109/CCECE.2009.5090086"}, {"id": "9ff1a4754391a5cf91c998eeaf75b93a4f5f2451", "title": "Face-based multiple instance analysis for smart electronics billboard", "year": 2011, "pdf": "https://doi.org/10.1007/s11042-011-0746-9"}, {"id": "8ce12860390fc501e89578b6f65c409209ade041", "title": "Stacked Progressive Auto-Encoders (SPAE) for Face Recognition Across Poses", "year": "2014", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909639"}, {"id": "20823c6b9798094048bf4d59b26f5b92723c9b71", "title": "Color Face Recognition Using Quaternion PCA", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/2082/3c6b9798094048bf4d59b26f5b92723c9b71.pdf"}, {"id": "2331df8ca9f29320dd3a33ce68a539953fa87ff5", "title": "Extended Isomap for Pattern Classification", "year": 2002, "pdf": "http://faculty.ucmerced.edu/mhyang/papers/aaai02.pdf"}, {"id": "0316ab93582f0f32b6137a0582d94aa31bfe3d1d", "title": "Face recognition method based on fuzzy 2DPCA", "year": 2014, "pdf": null}, {"id": "8b82af24bf580cbd22467ab69c588da87956a7f3", "title": "Enhanced Pictorial Structures for precise eye localization under incontrolled conditions", "year": "2009", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5206818"}, {"id": "f38409e401e3aae3852419a2fe7c4c7fb4ecc1b6", "title": "Hand-Drawn Face Sketch Recognition by Humans and a PCA-Based Algorithm for Forensic Applications", "year": 2010, "pdf": "https://doi.org/10.1109/TSMCA.2010.2041654"}, {"id": "a4cdb309c62182a927b0baa2f8caf6f06aec1afe", "title": "Can You See Me Now? Sensor Positioning for Automated and Persistent Surveillance", "year": "2010", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5170013"}, {"id": "2dde7c6740577b8326db4e606c0205f60fd19ee4", "title": "Efficient statistical face recognition across pose using Local Binary Patterns and Gabor wavelets", "year": 2009, "pdf": null}, {"id": "044600cc4b93bb0504e8d72a5476d16f1a61a107", "title": "Discriminant Analysis of Principal Components for Face Recognition", "year": 1998, "pdf": "http://pdfs.semanticscholar.org/0446/00cc4b93bb0504e8d72a5476d16f1a61a107.pdf"}, {"id": "f337bbee95772584e5e082bbffa67ac184d79155", "title": "Automatic extraction of face contours in images and videos", "year": 2012, "pdf": "https://doi.org/10.1016/j.future.2010.11.008"}, {"id": "190d8bd39c50b37b27b17ac1213e6dde105b21b8", "title": "Mining Weakly Labeled Web Facial Images for Search-Based Face Annotation", "year": 2011, "pdf": "https://dr.ntu.edu.sg/bitstream/handle/10220/18955/fp518-wang.pdf?isAllowed=y&sequence=1"}, {"id": "c85826857ce3d08c5b3aca40ccab4ad57f2aa794", "title": "Improving performance of PNN using clustered ICs for gender classification", "year": 2012, "pdf": null}, {"id": "16d84109fbfad8fc25dad8a52f95aab56fd49f18", "title": "Computation of a face attractiveness index based on neoclassical canons, symmetry, and golden ratios", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/7e89/4832755b3c4ac641ac48e139415fa831f358.pdf"}, {"id": "958b737459ad41d828a720896030aa0ffc01f43e", "title": "Locating head and face boundaries for head-shoulder images", "year": 1999, "pdf": "http://pdfs.semanticscholar.org/958b/737459ad41d828a720896030aa0ffc01f43e.pdf"}, {"id": "b29d70f38bd4759cd9d8c2fdc9312f7f807f4fa9", "title": "Face Recognition in Subspaces", "year": "2011", "pdf": "https://pdfs.semanticscholar.org/b0f3/532a3c071d706399e52b474b2335ec8e7a19.pdf"}, {"id": "fdebde7926e87dbfb6e73dd4f8324ad2ec45d7a6", "title": "Image Segmentation for Biometric Identification Systems", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/fdeb/de7926e87dbfb6e73dd4f8324ad2ec45d7a6.pdf"}, {"id": "c2295cbb15d913fdcf6fb43f686296835fdd3333", "title": "Block based curvelet feature extraction for face recognition", "year": 2009, "pdf": null}, {"id": "20a88cc454a03d62c3368aa1f5bdffa73523827b", "title": "Face recognition using a kernel fractional-step discriminant analysis algorithm", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/d620/7593c39255ac8ce7536e5958a99f52d6bb60.pdf"}, {"id": "b219a0ecf987f1500042b7cb4da174f361eaadf3", "title": "Face Recognition of Different Modalities Using SIFT and LBP Features", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/b219/a0ecf987f1500042b7cb4da174f361eaadf3.pdf"}, {"id": "99cf068a2c453785ebd1b109ccfa158bd3b62803", "title": "Racial stereotypes and interracial attraction: phenotypic prototypicality and perceived attractiveness of Asians.", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/99cf/068a2c453785ebd1b109ccfa158bd3b62803.pdf"}, {"id": "92911c43f91cc93a3357b48bc685ed676ef9ec11", "title": "Local Patterns of Gradients for Face Recognition", "year": 2015, "pdf": "https://doi.org/10.1109/TIFS.2015.2426144"}, {"id": "c1545782d49ae3cfd680b91caeb4b30042938416", "title": "Sparse two-dimensional singular value decomposition", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552922"}, {"id": "2e1a1deb7dccff41fca7447364d6748bf362fb70", "title": "A topographical nonnegative matrix factorization algorithm", "year": 2013, "pdf": "https://doi.org/10.1109/IJCNN.2013.6706849"}, {"id": "87ba4cce558c2defde90f4b42853262fd572ca3e", "title": "Silhouette estimation.", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/87ba/4cce558c2defde90f4b42853262fd572ca3e.pdf"}, {"id": "fbd12d85742c08647f5646bf6dfd239732f471e5", "title": "Learning the kernel matrix by maximizing a KFD-based class separability criterion", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/fbd1/2d85742c08647f5646bf6dfd239732f471e5.pdf"}, {"id": "288c03d30821d5b12754c8f21bcd76a76dd4a6fb", "title": "Feature-based Face Detection Against Skin-color Like Backgrounds with Varying Illumination", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/288c/03d30821d5b12754c8f21bcd76a76dd4a6fb.pdf"}, {"id": "e476cbcb7c1de73a7bcaeab5d0d59b8b3c4c1cbf", "title": "Robust Kernel Representation With Statistical Local Features for Face Recognition", "year": 2013, "pdf": "https://doi.org/10.1109/TNNLS.2013.2245340"}, {"id": "e1f5c25461f3cbe1c29a29265e740adddaef7693", "title": "Retrieval Based on Indexing for Compressed Domain", "year": 2008, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CSSE.2008.202"}, {"id": "0b5bd3ce90bf732801642b9f55a781e7de7fdde0", "title": "Face recognition using Histograms of Oriented Gradients", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/0b5b/d3ce90bf732801642b9f55a781e7de7fdde0.pdf"}, {"id": "c33c8bb663f651918ff8c4ce04d941e1d2e9be0e", "title": "Cost-Sensitive Subspace Analysis and Extensions for Face Recognition", "year": 2013, "pdf": "https://doi.org/10.1109/TIFS.2013.2243146"}, {"id": "426840ccf74bbd8b087cf357efdb80ecc85ea2ab", "title": "Reduced Analytic Dependency Modeling: Robust Fusion for Visual Recognition", "year": 2014, "pdf": "https://doi.org/10.1007/s11263-014-0723-7"}, {"id": "38bdcf2f1abf8e0e1d5bcfcee83324d4d21ba93d", "title": "Face hallucination via position-based dictionaries coding in kernel feature space", "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/SMARTCOMP.2014.7043850"}, {"id": "030c82b87e3cdc5ba35c443a93ff4a9d21c2bc2f", "title": "Appearance Characterization of Linear Lambertian Objects, Generalized Photometric Stereo, and Illumination-Invariant Face Recognition", "year": 2007, "pdf": "http://www.cfar.umd.edu/~shaohua/papers/zhou07tpami_gps.pdf"}, {"id": "2542a1e7b005d5fe6b60aed0c4927e8b87d4b14e", "title": "Multiscale Feature Fusion for Face Identification", "year": 2017, "pdf": "https://doi.org/10.1109/CYBConf.2017.7985795"}, {"id": "992e4119d885f866cb715f4fbf0250449ce0db05", "title": "Glasses detection on real images based on robust alignment", "year": 2015, "pdf": "https://doi.org/10.1007/s00138-015-0674-1"}, {"id": "ea2c73f96e1d1610a562b485d780a885a883f098", "title": "Face recognition using Local Quaternion Patters and Weighted Spatially constrained Earth Mover's Distance", "year": 2009, "pdf": null}, {"id": "67d7022462c98e6c5de9f2254b46f0b8d3b92089", "title": "Facial image database mining and classification analysis using different distance metrics", "year": 2017, "pdf": null}, {"id": "eedd405b9c44da778ed3246ec3df2d5b26ca0f7f", "title": "Multiple features facial image retrieval by spectral regression and fuzzy aggregation approach", "year": 2011, "pdf": "https://doi.org/10.1108/17563781111186734"}, {"id": "562853ff22c51c7fdd10d613627880006972f9de", "title": "Optimal Locality Preserving Projection", "year": 2010, "pdf": "https://doi.org/10.1109/ICIP.2010.5653271"}, {"id": "f4373f5631329f77d85182ec2df6730cbd4686a9", "title": "Recognizing Gender from Human Facial Regions using Genetic Algorithm", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/f437/3f5631329f77d85182ec2df6730cbd4686a9.pdf"}, {"id": "529e2ce6fb362bfce02d6d9a9e5de635bde81191", "title": "Normalization of Face Illumination Based on Large-and Small-Scale Features", "year": 2011, "pdf": "https://doi.org/10.1109/TIP.2010.2097270"}, {"id": "da2d7ca77376b90a79287a517f596af628c8f488", "title": "The development trend of evaluating face-recognition technology", "year": 2014, "pdf": null}, {"id": "147fe6bfc76f30ccacc3620662511e452bc395f6", "title": "A Survey of Face Recognition Techniques", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/147f/e6bfc76f30ccacc3620662511e452bc395f6.pdf"}, {"id": "62e0edcf5a2bab8163851ca1d6ce50d42c367660", "title": "Generalized two-dimensional linear discriminant analysis with regularization", "year": "2018", "pdf": "https://arxiv.org/pdf/1801.07426.pdf"}, {"id": "b371f417325e2797898ca2cc6a694ab0299b9c10", "title": "Pre-Capture Privacy for Small Vision Sensors", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7778202"}, {"id": "09c34a4829eb5d37f763e06e5f044396d518dc2d", "title": "Precise eye localization using HOG descriptors", "year": "2010", "pdf": "http://doi.org/10.1007/s00138-010-0273-0"}, {"id": "85d9cc478a6ef976ae83c8817d7d35e94b4dcf9d", "title": "Big Data Processing With Application to Image Super-Resolution", "year": "2014", "pdf": "https://pdfs.semanticscholar.org/85d9/cc478a6ef976ae83c8817d7d35e94b4dcf9d.pdf"}, {"id": "8e36cc33db5aa581cd826e6ba5f830d40d674712", "title": "Using Biologically Inspired Features for Face Processing", "year": 2007, "pdf": "http://www.cs.tau.ac.il/~wolf/papers/meyerswolf2007.pdf"}, {"id": "c6785dfc407c3b63eda72956c143a5f5caaa4fb5", "title": "Developing preferential attention to a speaker: A robot learning to recognise its carer", "year": 2009, "pdf": "https://doi.org/10.1109/ALIFE.2009.4937697"}, {"id": "0c68a5c871b92639631ae65eea8da430054bb729", "title": "Facial deblur inference to improve recognition of blurred faces", "year": 2009, "pdf": "http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0552.pdf"}, {"id": "0c0b33baf60c787b3361a2671ae9aa077545b845", "title": "A meta-analysis of face recognition covariates", "year": 2009, "pdf": "http://www.cs.colostate.edu/~ross/research/papers/yr2009/btas09meta.pdf"}, {"id": "0c926122bdfd45bbaeec8c4d8a1aa85d4d033876", "title": "Combined Fisherfaces framework", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/0c92/6122bdfd45bbaeec8c4d8a1aa85d4d033876.pdf"}, {"id": "2227fb88da0ec66b5dc75a6c026022920e1fe605", "title": "Shallow and deep learning for image classification", "year": 2017, "pdf": null}, {"id": "dc9a6a821689de877bd07e970e52d4cdb1dd2714", "title": "Transfer of gender aftereffects in face silhouettes reveals face-specific mechanisms", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/dc9a/6a821689de877bd07e970e52d4cdb1dd2714.pdf"}, {"id": "90ae02da16b750a9fd43f8a38440f848309c2fe0", "title": "A review of facial gender recognition", "year": 2015, "pdf": "https://doi.org/10.1007/s10044-015-0499-6"}, {"id": "a72f4c3ec0699675e3050900a98434402b221b16", "title": "Latent face model for across-media face recognition", "year": 2016, "pdf": "https://doi.org/10.1016/j.neucom.2016.08.036"}, {"id": "580ee9b22c8f97642a3546ae3ded6d324d4b9098", "title": "Face Recognition with Convolutional Neural Networks and subspace learning", "year": 2017, "pdf": null}, {"id": "7bd37e6721d198c555bf41a2d633c4f0a5aeecc1", "title": "Fusing Local Patterns of Gabor and Non-subsampled Contourlet Transform for Face Recognition", "year": 2013, "pdf": "https://doi.org/10.1109/ACPR.2013.58"}, {"id": "317d39381baba14edabc74de70890f946c7f8468", "title": "Feature subspace determination in video-based mismatched face recognition", "year": 2008, "pdf": "https://doi.org/10.1109/AFGR.2008.4813417"}, {"id": "305e9283f2f2ec0e1107469ec35964fad0c7bf06", "title": "Video-based face model fitting using Adaptive Active Appearance Model", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/305e/9283f2f2ec0e1107469ec35964fad0c7bf06.pdf"}, {"id": "f4717cc1de51c8b3a7c5b68fd98ae16672bc6181", "title": "Learning of face components in coherent and disturbed constellations", "year": 2010, "pdf": null}, {"id": "c90fccd1a1e928fcdc6940a7a616755b9b26ad36", "title": "Combination of two novel LDA-based methods for face recognition", "year": 2007, "pdf": "https://doi.org/10.1016/j.neucom.2006.10.008"}, {"id": "3acdccd33e518f22dcfe36ee29c332a644afdb25", "title": "Automatic Detection of Facial Midline And Its Contributions To Facial Feature Extraction", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/3acd/ccd33e518f22dcfe36ee29c332a644afdb25.pdf"}, {"id": "51082f012c050dd8fa872962f93c5407a94f6daa", "title": "Ensembles of large margin nearest neighbour with grouped lateral patch arrangement for face classification", "year": 2016, "pdf": null}, {"id": "98a660c15c821ea6d49a61c5061cd88e26c18c65", "title": "Face Databases for 2D and 3D Facial Recognition: A Survey", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/98a6/60c15c821ea6d49a61c5061cd88e26c18c65.pdf"}, {"id": "35457de70ea13415b8abd3898a4a83021946501f", "title": "Learning Robust and Discriminative Subspace With Low-Rank Constraints", "year": 2016, "pdf": "https://doi.org/10.1109/TNNLS.2015.2464090"}, {"id": "930b3472592ced6665cd630be7ae57d4abb8b4b1", "title": "Development of two novel face-recognition CAPTCHAs: A security and usability study", "year": "2016", "pdf": "http://doi.org/10.1016/j.cose.2016.03.007"}, {"id": "8ec6f729e2c7c81333f3f7b13377cc7e93a61394", "title": "Face Recognition Based on SVM and 2DPCA", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/8ec6/f729e2c7c81333f3f7b13377cc7e93a61394.pdf"}, {"id": "a9af0dc1e7a724464d4b9d174c9cf2441e34d487", "title": "Gabor-scale binary pattern for face recognition", "year": 2016, "pdf": "https://doi.org/10.1142/S0219691316500351"}, {"id": "3264df8917ad5e39e9a7f33bbfbce25ac7473a9b", "title": "Robust and Efficient Approach Based Face Recognition System Using Log Likely", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/3264/df8917ad5e39e9a7f33bbfbce25ac7473a9b.pdf"}, {"id": "7e1d764e986855dab2d88c5ef69d100af5645f43", "title": "Maximizing intra-individual correlations for face recognition across pose differences", "year": 2009, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2009.5206659"}, {"id": "27e709efabeb2548b789f3c9f45a426997c6147c", "title": "The neural correlates of memory encoding and recognition for own-race and other-race faces", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/27e7/09efabeb2548b789f3c9f45a426997c6147c.pdf"}, {"id": "d7d09323bf8226f9cc06402dc3026fd4f1e75859", "title": "BDPCA plus LDA: a novel fast feature extraction technique for face recognition", "year": 2006, "pdf": "http://ira.lib.polyu.edu.hk/bitstream/10397/240/1/SMCB_C_36_4_06.pdf"}, {"id": "763d9eef06b454d722c88ffab8dfb9538a57c06b", "title": "Audio-video biometric recognition for non-collaborative access granting", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/763d/9eef06b454d722c88ffab8dfb9538a57c06b.pdf"}, {"id": "80345fbb6bb6bcc5ab1a7adcc7979a0262b8a923", "title": "Soft Biometrics for a Socially Assistive Robotic Platform", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/4569/f8e017af1e052b075d8a267116a8b795bd84.pdf"}, {"id": "30dc95969c36187ae96e414f2f45e3729213a5b8", "title": "Newton optimization based Congealing for facial image alignment", "year": 2011, "pdf": "https://doi.org/10.1109/ICIP.2011.6116614"}, {"id": "e089462ee3d91074402f25eb45bcf961da9ee2cb", "title": "Single sample face recognition via lower-upper decomposition", "year": 2015, "pdf": "https://doi.org/10.1109/ASCC.2015.7244805"}, {"id": "62da5876fbc5b6abe467891fc71b68173e6ad061", "title": "Heterogeneous Face Recognition Using Kernel Prototype Similarities", "year": 2013, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2012.229"}, {"id": "4aff9eebbf4680ac8cdc2edcf650e4c467ce633c", "title": "Synthesis and Recognition of Face Profiles", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/4aff/9eebbf4680ac8cdc2edcf650e4c467ce633c.pdf"}, {"id": "da7f6d85e7843bd57d43f1e97ae93384420cd78d", "title": "Face Recognition by Regularized Discriminant Analysis", "year": 2007, "pdf": "https://pdfs.semanticscholar.org/da7f/6d85e7843bd57d43f1e97ae93384420cd78d.pdf"}, {"id": "0cec42a1593a02ce3f4a44d375e3b95f5797aa21", "title": "Recognizing Scene Categories of Historical Postcards", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/0cec/42a1593a02ce3f4a44d375e3b95f5797aa21.pdf"}, {"id": "474b6593d37c9c6547e2f0fcbfa8a9866b5cccd6", "title": "The Iteration-Tuned Dictionary for sparse representations", "year": 2010, "pdf": "http://www.irisa.fr/temics/publis/2010/bitd_mmsp_embbeded.pdf"}, {"id": "0021e292c9d8fd19f5edd1cde5bc99c112f1992d", "title": "Fast multi-scale local phase quantization histogram for face recognition", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/0021/e292c9d8fd19f5edd1cde5bc99c112f1992d.pdf"}, {"id": "8d6d50879a7633eca05c8d74a0e586f1f2892e83", "title": "TCU: Thread compaction unit for GPGPU applications on mobile graphics hardware", "year": 2012, "pdf": "https://doi.org/10.1109/MMSP.2012.6343431"}, {"id": "b13c28b35571627162cf46765821c739a7dc2d62", "title": "The Visual Object Tracking VOT2013 Challenge Results", "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6755885"}, {"id": "4afced9cea17d379f0a84156f2b726de7b3f1007", "title": "A Window of Opportunity for Cognitive Training in Adolescence", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4afc/ed9cea17d379f0a84156f2b726de7b3f1007.pdf"}, {"id": "b9cedd1960d5c025be55ade0a0aa81b75a6efa61", "title": "Inexact Krylov Subspace Algorithms for Large Matrix Exponential Eigenproblem from Dimensionality Reduction", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/b9ce/dd1960d5c025be55ade0a0aa81b75a6efa61.pdf"}, {"id": "3f4137c23193bd853d8319620f85fd11b591dbf4", "title": "Remarks on BioHashing based cancelable biometrics in verification system", "year": "2006", "pdf": "http://doi.org/10.1016/j.neucom.2006.01.024"}, {"id": "b1290dff343ae4980e3e853055ad9a5b9116238b", "title": "Gender Classification Based on Fusion of Different Spatial Scale Features Selected by Mutual Information From Histogram of LBP, Intensity, and Shape", "year": 2013, "pdf": "http://www.cec.uchile.cl/~clperez/papers/Gender%20Classification%20Based%20on%20Fusion%20of%20Different%20Spatial%20Scale%20Features%20Selected%20by%20Mutual%20Information%20From%20Histogram%20of%20LBP,%20Intensity,%20and%20Shape%20.pdf"}, {"id": "38c7f80a1e7fa1bdec632042318dc7cdd3c9aad4", "title": "Hierarchical Skin-AdaBoost-Neural Network (H-SKANN) for multi-face detection", "year": "2018", "pdf": "http://doi.org/10.1016/j.asoc.2018.03.030"}, {"id": "fe5b731604bf7da5e64fcea99b065c6c54474504", "title": "Solving the face growth problem in the biometrie face recognition using Photo-Anthropometric ratios by iris normalization", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8401553"}, {"id": "6ef6e6e8b295c90ab9390f07d91c9ef8304a409d", "title": "Preserving privacy by de-identifying face images", "year": 2005, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TKDE.2005.32"}, {"id": "a4af26ad72298409e987128b9b130ca0e428baf3", "title": "A Nuclear Norm Based Matrix Regression Based Projections Method for Feature Extraction", "year": 2018, "pdf": "https://doi.org/10.1109/ACCESS.2017.2784800"}, {"id": "a3b183d041f8f3e90a2cf904eaab544070216367", "title": "Gabor Ordinal Measures for Face Recognition", "year": 2014, "pdf": "https://pdfs.semanticscholar.org/a3b1/83d041f8f3e90a2cf904eaab544070216367.pdf"}, {"id": "9d1ac83b18dd3cb4714d8b08cf27e7417bb2ccf8", "title": "Being bored? Recognising natural interest by extensive audiovisual integration for real-life application", "year": 2009, "pdf": "https://doi.org/10.1016/j.imavis.2009.02.013"}, {"id": "275b3cb7c780c663eabbf4d6c6cbc8fe24287c70", "title": "The Impact of Bio-Inspired Approaches Toward the Advancement of Face Recognition", "year": 2015, "pdf": "http://doi.acm.org/10.1145/2791121"}, {"id": "05f4d907ee2102d4c63a3dc337db7244c570d067", "title": "Face recognition from a single image per person: A survey", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/3c52/2c9707eb795e0dba69202f1ec946a9072661.pdf"}, {"id": "e4f3e0951fd66abe3b5dfbca88f676df82846214", "title": "Automatic age classification with LBP", "year": 2008, "pdf": null}, {"id": "ababbd7c7a3a66f6b180ba0e3c3fdaa8c79c08f3", "title": "Hierarchical and multi-featured fusion for effective gait recognition under variable scenarios", "year": "2015", "pdf": "http://doi.org/10.1007/s10044-015-0471-5"}, {"id": "c85adcc3cc2f3ab27def7e1c615b52ac182dde80", "title": "Improving face gender classification by adding deliberately misaligned faces to the training data", "year": 2008, "pdf": "http://researchcommons.waikato.ac.nz/bitstream/handle/10289/2172/facegenderclass.pdf?sequence=1"}, {"id": "c463b431a503f4eab370f1938e8104c2eaac6e32", "title": "A novel face recognition method based on the local color vector binary patterns of features localization", "year": 2014, "pdf": "https://doi.org/10.1109/ICNC.2014.6975954"}, {"id": "4e93a8a47473bf57e24aec048cb870ab366a43d6", "title": "Face authentication for multiple subjects using eigen#ow", "year": 2001, "pdf": "http://pdfs.semanticscholar.org/4e93/a8a47473bf57e24aec048cb870ab366a43d6.pdf"}, {"id": "6c091c3bd625b3c838831d797c66eae6c8f280cc", "title": "Apperance-Based Tracking and Face Identification in Video Sequences", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/6c09/1c3bd625b3c838831d797c66eae6c8f280cc.pdf"}, {"id": "05f1c38ae248e626be99354db4b7fb7f7289bd1e", "title": "Multi-scale Local Binary Pattern histogram for gender classification", "year": 2015, "pdf": null}, {"id": "f3dc0852ba430519ffa7e383a5c5e5bf1a47b19d", "title": "Feature Extraction and Representation for Face Recognition", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/f3dc/0852ba430519ffa7e383a5c5e5bf1a47b19d.pdf"}, {"id": "36688a79cc8926f489ccb6e6dadba15afbb4b6a4", "title": "Linear discriminant analysis for the small sample size problem: an overview", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/3668/8a79cc8926f489ccb6e6dadba15afbb4b6a4.pdf"}, {"id": "f6567671cc9d204c1dd1322e9c49d2053ed734c5", "title": "A Review of Vision Based Hand Gestures Recognition", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/f656/7671cc9d204c1dd1322e9c49d2053ed734c5.pdf"}, {"id": "f6dabb4d91bf7389f3af219d486d4e67cec18c17", "title": "Vector projection for face recognition", "year": 2014, "pdf": "https://doi.org/10.1016/j.compeleceng.2014.08.010"}, {"id": "781d3550f54f3b4bfbd99ca9957aba6d6dec990e", "title": "Regularized Kernel Discriminant Analysis With a Robust Kernel for Face Recognition and Verification", "year": 2012, "pdf": "http://ibug.doc.ic.ac.uk/media/uploads/documents/reg_klda.pdf"}, {"id": "a729d0243b1e3b055f44248a32b3caf20b7e93be", "title": "XJU1: A Chinese Ethnic Minorities Face Database", "year": 2017, "pdf": null}, {"id": "a24c2200cabe8f034ed94dc45b9c389eb9564f07", "title": "Image-to-image face recognition using Dual Linear Regression based Classification and Electoral College voting", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCI-CC.2016.7862022"}, {"id": "e94168c35be1d4b4d2aaf42ef892e64a3874ed8c", "title": "Two-Dimensional Maximum Margin Feature Extraction for Face Recognition", "year": 2009, "pdf": "https://doi.org/10.1109/TSMCB.2008.2010715"}, {"id": "6a3100af83e98c76c2bfe26ef61bffd289f64862", "title": "A novel semi-supervised learning for face recognition", "year": 2015, "pdf": "https://doi.org/10.1016/j.neucom.2014.11.018"}, {"id": "fe593f4e59a5b0663fd10aa359c09ff385b5405c", "title": "Hierarchical ensemble of Gabor Fisher classifier for face recognition", "year": "2006", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1613003"}, {"id": "31d007eda3aca9c5114559ddd1f80c5275f3a84f", "title": "A Suitable Adaptive Illumination Compensation Method for Face Detection", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/31d0/07eda3aca9c5114559ddd1f80c5275f3a84f.pdf"}, {"id": "ab9a9f42b4492284e7305f273ce9d7fd8c14d605", "title": "Face recognition based on sparse representation and error correction SVM", "year": 2012, "pdf": "https://doi.org/10.1109/IJCNN.2012.6252426"}, {"id": "88f2952535df5859c8f60026f08b71976f8e19ec", "title": "A neural network framework for face recognition by elastic bunch graph matching", "year": "", "pdf": "http://pdfs.semanticscholar.org/88f2/952535df5859c8f60026f08b71976f8e19ec.pdf"}, {"id": "c8673edad31a9f991f6c446c057e03e130611a7b", "title": "Multi-Task Pose-Invariant Face Recognition", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7006757"}, {"id": "d8bf32e0936bae857cbca669f3ebf21455cf9d10", "title": "Face recognition using FLDA with single training image per person", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/d8bf/32e0936bae857cbca669f3ebf21455cf9d10.pdf"}, {"id": "6c9e12c7ac10b6202762b9cd7ffae3822c90c063", "title": "Deployment of Customized Deep Learning based Video Analytics On Surveillance Cameras.", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.10604.pdf"}, {"id": "c46759303c4922d6319faed14462751ec689752f", "title": "Approximately symmetrical face images for image preprocessing in face recognition and sparse representation based classification", "year": "2016", "pdf": "http://doi.org/10.1016/j.patcog.2015.12.017"}, {"id": "5bcce11db01aced72e17a4b7ec8633088dc46e72", "title": "VULNERABILITIES AND ATTACK PROTECTION IN SECURITY SYSTEMS BASED ON BIOMETRIC RECOGNITION \u2013TESIS DOCTORAL\u2013 VULNERABILIDADES Y PROTECCIO\u0301N FRENTE A ATAQUES EN SISTEMAS DE SEGURIDAD BASADOS EN RECONOCIMIENTO BIOME\u0301TRICO", "year": "2009", "pdf": "https://pdfs.semanticscholar.org/a323/9de6f4c300b135d5c417890ab68be8e90801.pdf"}, {"id": "1f47a13548317602ec76eafbea44d7b39926c4cd", "title": "Stacking-Based Deep Neural Network: Deep Analytic Network for Pattern Classification", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.07184.pdf"}, {"id": "597fd0ba70a6ff2f414b76822a204d6a46334b2c", "title": "FINGERPRINT AND FACE IMAGE RECOGNITION", "year": "2011", "pdf": null}, {"id": "fafa7bbd6b37dc97237155654e1a4d1f1aba70f8", "title": "Radial Basis Function Neuroscaling Algorithms for Efficient Facial Image Recognition", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/fafa/7bbd6b37dc97237155654e1a4d1f1aba70f8.pdf"}, {"id": "fb6ebea810ebddc0c3e598e929369e395e012da5", "title": "A Wavelet Domain Implementation of Sparse Representation Method for Face Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8623943"}, {"id": "c27b1b92aa616d09befdb525e13e10bb959267bd", "title": "A manifold ranking based method using hybrid features for crime scene shoeprint retrieval", "year": "2016", "pdf": "http://doi.org/10.1007/s11042-016-4029-3"}, {"id": "e37a1c3590bbc670210b77ad44772c852d483672", "title": "General type-2 fuzzy edge detector applied on face recognition system using neural networks", "year": 2016, "pdf": "https://doi.org/10.1109/FUZZ-IEEE.2016.7737983"}, {"id": "f6a050314881488d0f6653c0c6883937a722eff5", "title": "Retina Recognition Based on Fractal Dimension", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/f6a0/50314881488d0f6653c0c6883937a722eff5.pdf"}, {"id": "5479da1038a530beb760a38dbb5b08947dfaefbd", "title": "Fusing continuous spectral images for face recognition under indoor and outdoor illuminants", "year": 2008, "pdf": "http://imaging.utk.edu/publications/papers/2010/mva_hc.pdf"}, {"id": "8e94ed0d7606408a0833e69c3185d6dcbe22bbbe", "title": "For your eyes only", "year": 2012, "pdf": "http://doi.ieeecomputersociety.org/10.1109/WACV.2012.6163013"}, {"id": "0318c3a969d714581fee93324f5fbe6f5ce685d6", "title": "FRVT 2006 and ICE 2006 Large-Scale Experimental Results", "year": 2010, "pdf": "http://www3.nd.edu/~kwb/PhillipsEtAlPAMI_2009.pdf"}, {"id": "ef230e3df720abf2983ba6b347c9d46283e4b690", "title": "QUIS-CAMPI: an annotated multi-biometrics data feed from surveillance scenarios", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/ef23/0e3df720abf2983ba6b347c9d46283e4b690.pdf"}, {"id": "d2f2abd945cbaebdfe570f3a4cb082eccd513b0f", "title": "Multi-directional two-dimensional PCA with matching score level fusion for face recognition", "year": 2012, "pdf": "https://doi.org/10.1007/s00521-012-0851-3"}, {"id": "afaa607aa9ad0e9dad0ce2fe5b031eb4e525cbd8", "title": "Towards an automatic face indexing system for actor-based video services in an IPTV environment", "year": 2010, "pdf": "http://dspace.kaist.ac.kr/bitstream/10203/24882/1/61.pdf"}, {"id": "256ef291cd320d39163d3b438010256a3c40c40c", "title": "Multi-feature multi-manifold learning for single-sample face recognition", "year": 2014, "pdf": "https://doi.org/10.1016/j.neucom.2014.06.012"}, {"id": "f2c02911297245fbb31cafd6bb6cbbd51af5a32d", "title": "Robust score normalization for relational approaches to face authentication", "year": "2004", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7079857"}, {"id": "1a47f12a2490f6775c0ad863ac856de27f5b3e03", "title": "An \u21132/\u21131 regularization framework for diverse learning tasks", "year": 2015, "pdf": "https://doi.org/10.1016/j.sigpro.2014.11.010"}, {"id": "1fb1c935595016ff73cfde20a69d8c04475a537c", "title": "Color component feature selection in feature-level fusion based color face recognition", "year": 2010, "pdf": "http://www.researchgate.net/profile/Konstantinos_Plataniotis/publication/221359072_Color_component_feature_selection_in_feature-level_fusion_based_color_face_recognition/links/09e4150c09dc8dd82e000000.pdf"}, {"id": "8e351b3a0beaf62d8e1f8f879451f06577f9d04f", "title": "Contourlet-Based Feature Extraction with PCA for Face Recognition", "year": 2008, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AHS.2008.11"}, {"id": "25c304868ad936a73fead95d9b17e7b46025764d", "title": "A Rank-One Update Algorithm for Fast Solving Kernel Foley–Sammon Optimal Discriminant Vectors", "year": 2010, "pdf": "https://pdfs.semanticscholar.org/25c3/04868ad936a73fead95d9b17e7b46025764d.pdf"}, {"id": "787cdc730b35e380f0d33aa1b3428f3b3b1666e3", "title": "Study on Color Spaces for Single Image Enrolment Face Authentication", "year": "2010", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597144"}, {"id": "67ec24bf3d227ef11845db4743d3da7f3a5a8f5b", "title": "A New Hypothesis on Facial Beauty Perception", "year": 2014, "pdf": "http://doi.acm.org/10.1145/2622655"}, {"id": "ae8e0da37ca76063c74fefd1bade3a2ad3e6ecfc", "title": "Car recognition based on back lights and rear view features", "year": 2009, "pdf": "http://doi.ieeecomputersociety.org/10.1109/WIAMIS.2009.5031451"}, {"id": "166e5efacf632f18de3476cc5a942a20d017e68a", "title": "The eye says it all: Periocular region methodologies", "year": 2012, "pdf": null}, {"id": "22b4bd52b99cf08c188ead1e5aec87e242f095dd", "title": "Pose VariantFace Recognition Based on Linear Mapping", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3193074"}, {"id": "079a0b0db150b6bed24e06e3ad00a73c9fbe00a8", "title": "Convolutional neural network acceleration with hardware/software co-design", "year": "2017", "pdf": "http://doi.org/10.1007/s10489-017-1007-z"}, {"id": "fefd9778ef0c23b9e59c2a4748dcb98c827168f4", "title": "Semantic Pixel Sets Based Local Binary Patterns for Face Recognition", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/fefd/9778ef0c23b9e59c2a4748dcb98c827168f4.pdf"}, {"id": "c5e089ea32790a16a30b986f6d5c9583a346e143", "title": "High-Order Circular Derivative Pattern for Image Representation and Recognition", "year": "2010", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5595958"}, {"id": "50c77ccf2bd49b60a268081bbbefa30e4d46f257", "title": "Hand shape identification using palmprint alignment based on intrinsic local affine-invariant fiducial points", "year": 2014, "pdf": null}, {"id": "ae8240095c9cca2c395f173fece2f46277b94929", "title": "Weighted contourlet binary patterns and image-based fisher linear discriminant for face recognition", "year": 2017, "pdf": "https://doi.org/10.1016/j.neucom.2017.06.045"}, {"id": "c61743d4230bfda7e4497046fba8b132ed221d23", "title": "Biometric authentication using augmented face and random projection", "year": 2009, "pdf": "https://www.researchgate.net/profile/Hosik_Sohn2/publication/224083398_Biometric_authentication_using_augmented_face_and_random_projection/links/0fcfd50b88404b2674000000.pdf"}, {"id": "d723b1242e45b041d254bb85e5f69c0d96ca7daf", "title": "Robust face recognition using 2D and 3D data: Pose and illumination compensation", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/d723/b1242e45b041d254bb85e5f69c0d96ca7daf.pdf"}, {"id": "5454a2d207adccd04e4c3b4f38520ccac8a21d7b", "title": "Face recognition with Local Gradient Derivative Patterns", "year": 2010, "pdf": null}, {"id": "28312c3a47c1be3a67365700744d3d6665b86f22", "title": "Face Recognition: A Literature Survey1", "year": 2000, "pdf": "http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf"}, {"id": "9b0c311c32524d8a0bb6dbe2a96b7a7b2fe2dd21", "title": "Combined AdaBoost and gradientfaces for face detection under illumination problems", "year": 2012, "pdf": "https://doi.org/10.1109/ICSMC.2012.6378094"}, {"id": "3c4106f2c670362f620b33ad7715ab6fd3eb2458", "title": "Upscaling faces for recognition systems using trained filters", "year": 2009, "pdf": null}, {"id": "2d039aa3c8d64ca6054c96a49d250e18caf62b82", "title": "A THREE-LAYER SYSTEM FOR IMAGE RETRIEVAL", "year": "2007", "pdf": null}, {"id": "bbf49e0dc67663b2d116eebdae93abb0f276ac8a", "title": "Face hallucination based on morphological component analysis", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/bbf4/9e0dc67663b2d116eebdae93abb0f276ac8a.pdf"}, {"id": "9cb96a3f7895bb36d459acbeb65ccea3970e0634", "title": "Face Recognition by Exploring Information Jointly in Space, Scale and Orientation", "year": 2011, "pdf": "https://doi.org/10.1109/TIP.2010.2060207"}, {"id": "083a2bc86e0984968b06593ba06654277b252f00", "title": "Neural evidence for the contribution of holistic processing but not attention allocation to the other-race effect on face memory.", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/083a/2bc86e0984968b06593ba06654277b252f00.pdf"}, {"id": "0cf9f33546917e2d0edec03037f1a084d75f917b", "title": "Variable length dominant Gabor local binary pattern (VLD-GLBP) for face recognition", "year": 2014, "pdf": "https://doi.org/10.1109/VCIP.2014.7051511"}, {"id": "17393e698363709a864a2842ab77eaabc61cd386", "title": "A learning approach for single-frame face super-resolution", "year": 2009, "pdf": "https://doi.org/10.1109/ISCAS.2009.5117862"}, {"id": "01d94d568447075d9f32f6c1e7af9255188a6938", "title": "A biometric database with rotating head videos and hand-drawn face sketches", "year": 2009, "pdf": "http://www.csee.usf.edu/~scanavan/papers/BTAS09.pdf"}, {"id": "3d264c9426fc52aff381f9ecbaf36bf62bebe3c9", "title": "A Multiple Maximum Scatter Difference Discriminant Criterion for Facial Feature Extraction", "year": 2007, "pdf": "https://doi.org/10.1109/TSMCB.2007.906579"}, {"id": "5e1e618275155d8d2bc1a2a21eb7c139992d58f5", "title": "Box-constrained Discriminant Projective Non-negative Matrix Factorization through Augmented Lagrangian Multiplier Method", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8489373"}, {"id": "92b7fb7c376f3e255e3292a6e9c3c4c9067b9ef6", "title": "Multi-Frame Super-Resolution for Face Recognition", "year": "2007", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401949"}, {"id": "3ea11e30c2a67cc28d007e5a09f62181e363cdca", "title": "An Ensemble Approach to Robust Biometrics Fusion", "year": 2006, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2006.26"}, {"id": "e352288274f62be6abc37c944120bdd4979dc250", "title": "Face recognition using decimated redundant discrete wavelet transforms", "year": 2011, "pdf": "https://doi.org/10.1007/s00138-011-0331-2"}, {"id": "f66e2d403774d3fe49e8c37f8acecb4e43d1675d", "title": "Confidence Measure Using Composite Features for Eye Detection in a Face Recognition System", "year": 2015, "pdf": "https://doi.org/10.1109/LSP.2014.2335198"}, {"id": "aa5a78afb6e66f95a72b97a01ed1564de9669ccb", "title": "A performance driven methodology for cancelable face templates generation", "year": 2010, "pdf": "https://doi.org/10.1016/j.patcog.2010.02.001"}, {"id": "cebf73d590e0c0021f09bdbd59778bd574e96da7", "title": "First Impressions and the Reference Encounter : The In fl uence of Affect and Clothing on Librarian Approachability", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/cebf/73d590e0c0021f09bdbd59778bd574e96da7.pdf"}, {"id": "3af6487da9a054f59beda393ae85718a1c5b200d", "title": "Boosted Test-FDA: a transductive boosting method", "year": "2018", "pdf": "http://doi.org/10.1007/s10044-018-0710-7"}, {"id": "f405ec7999402e1765a9848ea5e22dd3e2b5d570", "title": "Feature Extraction Using Laplacian Maximum Margin Criterion", "year": 2010, "pdf": "https://doi.org/10.1007/s11063-010-9167-4"}, {"id": "29bcf87f48a5b4e2f06f20761cbc376d56df5f13", "title": "Face Recognition using PCA and SVM with Surf Technique", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/c874/90ab64e031cd9dcdb789d6426dd7ded5422f.pdf"}, {"id": "1ffa068439d779667afe40e9ec8d1560e15ab873", "title": "Improved Bayesian Approach for Face Recognition", "year": 2005, "pdf": null}, {"id": "3c78a53fd212b80ac7e61e7713d39e458abbb026", "title": "Partial Least Squares on Graphical Processor for Efficient Pattern Recognition", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/3c78/a53fd212b80ac7e61e7713d39e458abbb026.pdf"}, {"id": "a93ecf7b9780989c709714dde0f93f4d81eea640", "title": "Unconstrained Face Recognition Using SVM Across Blurred And Illuminated Images With Pose Variation", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/a93e/cf7b9780989c709714dde0f93f4d81eea640.pdf"}, {"id": "d2a79aa924ae81034d9e1a9ceb9e3004ff3922f6", "title": "Collaborative representation with reduced residual for face recognition", "year": 2014, "pdf": "https://doi.org/10.1007/s00521-014-1665-2"}, {"id": "e054291cbf250592d032722b99c414c2e4741c03", "title": "A Fast Fixed-Point Algorithm for Two-Class Discriminative Feature Extraction", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/e054/291cbf250592d032722b99c414c2e4741c03.pdf"}, {"id": "8b5af4bf220847d1237d97649c86dd76d43166fa", "title": "Fusion based feature vector for gender classification", "year": 2014, "pdf": "https://doi.org/10.1109/SIU.2014.6830453"}, {"id": "ed3f32f4a6bc3524595db621fa5f6c61cf4b7497", "title": "Gabor-HOG Features based Face Recognition Scheme", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/f0e0/a86075011860030f567e62a45743c1a5f5ab.pdf"}, {"id": "641f0989b87bf7db67a64900dcc9568767b7b50f", "title": "Reconstructing faces from their signatures using RBF regression", "year": "2013", "pdf": "https://pdfs.semanticscholar.org/e25a/6836e5f5dc6cf691cd9c42224c0f7f4bb42c.pdf"}, {"id": "0b440695c822a8e35184fb2f60dcdaa8a6de84ae", "title": "KinectFaceDB: A Kinect Database for Face Recognition", "year": "2014", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883"}, {"id": "f4003cbbff3b3d008aa64c76fed163c10d9c68bd", "title": "Compass local binary patterns for gender recognition of facial photographs and sketches", "year": 2016, "pdf": "https://doi.org/10.1016/j.neucom.2016.08.055"}, {"id": "86ff576c2fa8d4acbce75093459f0bf6c915c7db", "title": "Automated annotation of human faces in family albums", "year": 2003, "pdf": "http://nichol.as/papers/Zhang/Automated%20annotation%20of%20human%20faces%20in%20family0.pdf"}, {"id": "84ba2ff2a4d75e46bfe39c18ad075f4972c3ed73", "title": "Gabor volume based local binary pattern for face representation and recognition", "year": "2008", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813354"}, {"id": "2c5c89103605c6f0ed8924778526133dfa064a16", "title": "Blurred face recognition algorithm guided by a no-reference blur metric", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/2c5c/89103605c6f0ed8924778526133dfa064a16.pdf"}, {"id": "08c1f8f0e69c0e2692a2d51040ef6364fb263a40", "title": "Beyond Eigenfaces: Probabilistic Matching for Face Recognition", "year": 1998, "pdf": "http://pdfs.semanticscholar.org/0b20/0cf032430d74fd612601cc59d5af5608ceb4.pdf"}, {"id": "599a84997acba7d31fdc139867097858b4a08b1b", "title": "Evolutionary granular approach for recognizing faces altered due to plastic surgery", "year": "2011", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771337"}, {"id": "7d4b699a676482b76f2fa0ce7a2a75316616fb1d", "title": "Local Steerable Phase (LSP) Feature for Face Representation and Recognition", "year": 2006, "pdf": null}, {"id": "439ec47725ae4a3660e509d32828599a495559bf", "title": "Facial Expressions Tracking and Recognition: Database Protocols for Systems Validation and Evaluation", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/439e/c47725ae4a3660e509d32828599a495559bf.pdf"}, {"id": "b5dd744df6de73bd072b18d9108b79431a28c539", "title": "Gait Recognition With Shifted Energy Image and Structural Feature Extraction", "year": 2012, "pdf": "https://doi.org/10.1109/TIP.2011.2180914"}, {"id": "fdfdf08ed0d6e5f656ce25c99b02d5424fe4640d", "title": "A Framework for Combining Statistical and Structural Pattern Retrieval Based on Feature Histograms", "year": 2007, "pdf": null}, {"id": "c1b90cf91837628c430a796e7b6be6d8c010cc43", "title": "Local Steerable Pyramid Binary Pattern Sequence LSPBPS for face recognition method", "year": "2009", "pdf": "https://pdfs.semanticscholar.org/0553/ea780eac70eb10f1022cd1886d0ee5a7c0c4.pdf"}, {"id": "0cede3b170369d53181d5b67c3baaec0e2bbfee4", "title": "Prediction of eigenvalues and regularization of eigenfeatures for human face verification", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/0ced/e3b170369d53181d5b67c3baaec0e2bbfee4.pdf"}, {"id": "79150c5938946a8dcb39d32f0c47f99cf6746679", "title": "Face Recognition by Discriminant Analysis with Gabor Tensor Representation", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/7915/0c5938946a8dcb39d32f0c47f99cf6746679.pdf"}, {"id": "0c286be42e734c2469563e189d7a8b11155386d5", "title": "ABSTRACT Title of Dissertation: GAIT AS A BIOMETRIC FOR PERSON IDENTIFICATION IN VIDEO", "year": 2002, "pdf": "http://pdfs.semanticscholar.org/0c28/6be42e734c2469563e189d7a8b11155386d5.pdf"}, {"id": "2ed6f30a59d7d4ebaa69b3e61c62263d37a501ae", "title": "Recognizing Imprecisely Localized, Partially Occluded and Expression Variant Faces from a Single Sample per Class", "year": "", "pdf": "http://pdfs.semanticscholar.org/2ed6/f30a59d7d4ebaa69b3e61c62263d37a501ae.pdf"}, {"id": "f6b8f8511f8a66209ff2e7f163f9265ac077ba3e", "title": "Face recognition with lattice independent component analysis and extreme learning machines", "year": "2012", "pdf": "http://doi.org/10.1007/s00500-012-0826-4"}, {"id": "08169c3e0179aa71cf5dfb9f048392916433b3e5", "title": "Asymmetrically boosted HMM for speech reading", "year": 2004, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2004.37"}, {"id": "96c2fbea5a64c71d620f8acde51eb233d83d071d", "title": "Single Sample Face Recognition: Discriminant Scaled Space vs Sparse Representation-Based Classification", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8566424"}, {"id": "a27564276abb3c347b76b32396f897171cb5893f", "title": "A novel dynamic rough subspace based selective ensemble", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/a275/64276abb3c347b76b32396f897171cb5893f.pdf"}, {"id": "26b9d546a4e64c1d759c67cd134120f98a43c2a6", "title": "Polynomial Correlation Filters for Human Face Recognition", "year": 2012, "pdf": "https://doi.org/10.1109/ICMLA.2012.120"}, {"id": "024b5eb07d904cdb5c6782630622b7b5a5ab914a", "title": "Patch-Based Gabor Fisher Classifier for Face Recognition", "year": 2006, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICPR.2006.917"}, {"id": "9a1246da9730ddda1f982a668816b501791a2a64", "title": "Bionic Face Recognition Using Gabor Transformation", "year": 2010, "pdf": "https://doi.org/10.1142/S021800141100866X"}, {"id": "6aa9cb81b8408f2cec9c7bff267938c1b28b1b88", "title": "Using a multiclass novelty classifier for face recognition", "year": 2014, "pdf": null}, {"id": "ef3ef27557fa7ba64ab1f4d89dbdac86c55b760c", "title": "Face Verification System based on Integral Normalized Gradient Image(INGI)", "year": "2013", "pdf": "https://arxiv.org/pdf/1401.6112.pdf"}, {"id": "a805c753d65a8b4f56d01af363b61345c5d29a13", "title": "Multiscale Fusion of Visible and Thermal IR Images for Illumination-Invariant Face Recognition", "year": 2006, "pdf": "https://www.imaging.utk.edu/~koschan/paper/IJCV07_sk.pdf"}, {"id": "5ba51674897afa2a1bc0646fddada7510bd9c0ce", "title": "Video-based face recognition evaluation in the CHIL project - Run 1", "year": 2006, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FGR.2006.110"}, {"id": "1549771a3d7ddb3ce2b65e4685f617a0ecfc9823", "title": "Boosted of Haar-like Features and Local Binary Pattern Based Face Detection", "year": 2009, "pdf": "https://doi.org/10.1109/RIVF.2009.5174627"}, {"id": "404776aa18031828f3d5dbceed39907f038a47fe", "title": "Sparsely encoded local descriptor for face verification", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/4047/76aa18031828f3d5dbceed39907f038a47fe.pdf"}, {"id": "9fb1d7cbf1baf5f347d159410d22912fcee1fdb1", "title": "Face Detection Using Ferns", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/9fb1/d7cbf1baf5f347d159410d22912fcee1fdb1.pdf"}, {"id": "ddd23ef5c1b25f77ea4e22ebd421df7a58a71818", "title": "Derivative code and its pattern for object recognition", "year": 2012, "pdf": null}, {"id": "33ec047f1084e290c8a6f516bc75345b6bcf02a0", "title": "Smart cameras: 2D affine models for determining subject facial expressions", "year": 2010, "pdf": "https://doi.org/10.1109/TCE.2010.5505930"}, {"id": "3957b51b44f8727fe008162ea8a142a8c7917dea", "title": "Fully automatic pose-invariant face recognition via 3D pose normalization", "year": 2011, "pdf": "https://www.researchgate.net/profile/Michael_Jones20/publication/221110585_Fully_automatic_pose-invariant_face_recognition_via_3D_pose_normalization/links/02bfe50d33c1c4580c000000.pdf"}, {"id": "928b6bc4492740e8eba83366e9b21eb47ba59067", "title": "Recognizing human faces under varying degree of Illumination: A comprehensive survey", "year": 2015, "pdf": null}, {"id": "749bb1ab22c792dc5c7782714df74c392939c8d1", "title": "Human gait recognition based on matching of body components", "year": 2007, "pdf": "https://doi.org/10.1016/j.patcog.2006.11.012"}, {"id": "5c5e1f367e8768a9fb0f1b2f9dbfa060a22e75c0", "title": "Reference Face Graph for Face Recognition", "year": 2014, "pdf": "http://www.cs.ucr.edu/~mkafai/papers/Paper_tifs2014.pdf"}, {"id": "47cd38706e81ba6d73a9c56ebc9e42327a0a28cb", "title": "Computing the Principal Local Binary Patterns for face recognition using data mining tools", "year": 2012, "pdf": "https://doi.org/10.1016/j.eswa.2012.01.074"}, {"id": "bf012c86da23a52c37689b7e34fa50b25287ff57", "title": "A novel face recognition method based on Local Zernike Moments", "year": 2014, "pdf": "https://doi.org/10.1109/SIU.2014.6830463"}, {"id": "84eb543a649f7331403caef6aaf96177c6cf5571", "title": "Automatic Rank Determination in Projective Nonnegative Matrix Factorization", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/84eb/543a649f7331403caef6aaf96177c6cf5571.pdf"}, {"id": "15c9e9b495f255f7b435656b0282b2fa8eba3aed", "title": "Discriminant structure embedding for image recognition", "year": 2016, "pdf": "https://doi.org/10.1016/j.neucom.2015.09.071"}, {"id": "7a9cd340fc4f81469bb379f74f39a90a7c8c2534", "title": "Local feature learning for face recognition under varying poses", "year": 2015, "pdf": "https://doi.org/10.1109/ICIP.2015.7351334"}, {"id": "1672becb287ae3eaece3e216ba37677ed045db55", "title": "Fully automatic face normalization and single sample face recognition in unconstrained environments", "year": 2016, "pdf": "https://doi.org/10.1016/j.eswa.2015.10.047"}, {"id": "64378b2694db5a4d6cdcab831822a27df3b2765a", "title": "Learning Effective Intrinsic Features to Boost 3D-Based Face Recognition", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/6437/8b2694db5a4d6cdcab831822a27df3b2765a.pdf"}, {"id": "3ee4076041fe2412d50e84d6778a974d997d8660", "title": "Face Recognition Based on Optimal Kernel Minimax Probability Machine", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/3ee4/076041fe2412d50e84d6778a974d997d8660.pdf"}, {"id": "94798668ce20e71aed77430a8aae08c760e71b53", "title": "Pose invariant face recognition based on hybrid-global linear regression", "year": 2010, "pdf": "https://doi.org/10.1007/s00521-010-0359-7"}, {"id": "443cd4e2f2f8a52fd090ae056352155d872a5b06", "title": "Performance enhancement of local vector pattern with generalized distance local binary pattern for face recognition", "year": 2015, "pdf": null}, {"id": "24d99aea3edb618067c7308cc2261d53f8dd18d1", "title": "Sign Language Recognition from Homography", "year": "2006", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4036628"}, {"id": "f5ebdf848234729984489c97c6d255190b5bd345", "title": "Breaking the Limitation of Manifold Analysis for Super-Resolution of Facial Images", "year": 2007, "pdf": "https://doi.org/10.1109/ICASSP.2007.365972"}, {"id": "9c1325d24e72861ad35c6ba3d066ae9919c10876", "title": "(2D)2PCALDA: An efficient approach for face recognition", "year": 2009, "pdf": "https://doi.org/10.1016/j.amc.2009.03.014"}, {"id": "04a87fbfa63f642f9c7326dd6cbc8b58b41b6841", "title": "Recognition of sign language subwords based on boosted hidden Markov models", "year": 2005, "pdf": "http://doi.acm.org/10.1145/1088463.1088511"}, {"id": "0e26dfa11e2e56597a20985e3225fcd3c9192dfa", "title": "Multi-modality imagery database for plant phenotyping", "year": 2015, "pdf": "https://doi.org/10.1007/s00138-015-0734-6"}, {"id": "b372432ccd4c9cf169b1eee2adadae074eb3a3fd", "title": "Hallucinating face by position-patch", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/b372/432ccd4c9cf169b1eee2adadae074eb3a3fd.pdf"}, {"id": "d93fd354b469190ab73826f416d58840e8efa13c", "title": "Face recognition using Multi-modal Binary Patterns", "year": 2012, "pdf": "http://ieeexplore.ieee.org/document/6460635/"}, {"id": "f4b99820202f4b9fb1cbdc77c25eb446f085d6c2", "title": "On Random Transformations for Changeable Face Verification", "year": 2011, "pdf": "https://doi.org/10.1109/TSMCB.2010.2098439"}, {"id": "a6148dcfb0373d07e08bdecb8e32f127cf5c39ad", "title": "Facial image classification based on age and gender", "year": 2013, "pdf": null}, {"id": "0bddd3a4f45ef68faeb96cee89ff763e8e497af1", "title": "Regularization Method for Solving Denoising and Inpainting Task Using Stacked Sparse Denoising Autoencoders", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/0bdd/d3a4f45ef68faeb96cee89ff763e8e497af1.pdf"}, {"id": "c63e609f75a08bbb341909e1022411eba0425c52", "title": "Computer Vision \u2013 ECCV 2016 Workshops", "year": "2016", "pdf": "http://doi.org/10.1007/978-3-319-46604-0"}, {"id": "61d20ada590e7627351d19565c93c6bed012b5ec", "title": "Practical pose normalizaiton for pose-invariant face recognition", "year": 2015, "pdf": "https://doi.org/10.1109/ACPR.2015.7486477"}, {"id": "52e3014d81681bbaf92990da82459c0ca3d6df02", "title": "Feature extraction based on fuzzy 2DLDA", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/52e3/014d81681bbaf92990da82459c0ca3d6df02.pdf"}, {"id": "55c3b8f21b063b605a01a720af92c0a5de59333a", "title": "A Comparison of Pixel, Edge andWavelet Features for Face Detection using a Semi-Naive Bayesian Classifier", "year": 2006, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICPR.2006.50"}, {"id": "f355e54ca94a2d8bbc598e06e414a876eb62ef99", "title": "A survey on heterogeneous face recognition: Sketch, infra-red, 3D and low-resolution", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/f355/e54ca94a2d8bbc598e06e414a876eb62ef99.pdf"}, {"id": "e6023b72351b839fef05b4049ec16374fb083f88", "title": "Safety, deserve neither Liberty nor Safety.", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/e602/3b72351b839fef05b4049ec16374fb083f88.pdf"}, {"id": "33a9076d5d48208960feebff9d5efdaa2203f872", "title": "Face De-Identification", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/f437/b51fd53fd6e0fd327daaa0fbc5f3ee9a1da0.pdf"}, {"id": "85beedeb0b619ca411289b8839f09b9ffe8b9eb9", "title": "Better than best: matching score based face registration", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/85be/edeb0b619ca411289b8839f09b9ffe8b9eb9.pdf"}, {"id": "0b635b58cd4d739bed415f77de8d1ec3d79e26d4", "title": "Stan Z . Li Anil K . Jain ( Eds . ) Handbook of Face Recognition", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/0b63/5b58cd4d739bed415f77de8d1ec3d79e26d4.pdf"}, {"id": "ee7c9e3d417df6234d64364c6922b12433e0de38", "title": "Face Detection and Gesture Recognition for Human-Computer Interaction", "year": "2001", "pdf": "http://doi.org/10.1007/978-1-4615-1423-7"}, {"id": "c6643a771521b19ffc0df035e806b03c2ec00782", "title": "Incorporating invariants in Mahalanobis distance based classifiers: Application to Face Recognition", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/c664/3a771521b19ffc0df035e806b03c2ec00782.pdf"}, {"id": "92c2dd6b3ac9227fce0a960093ca30678bceb364", "title": "On Color Texture Normalization for Active Appearance Models", "year": 2009, "pdf": "https://aran.library.nuigalway.ie/bitstream/handle/10379/1350/On%20color%20texture%20normalization%20for%20active%20appearance%20models.pdf?isAllowed=y&sequence=1"}, {"id": "f3412c087282fcd60f08083515714f97179bedcb", "title": "A study on different experimental configurations for age, race, and gender estimation problems", "year": 2015, "pdf": "https://doi.org/10.1186/s13640-015-0089-y"}, {"id": "ca7be47d26abdc6fd3f229e9827484e91dbd7752", "title": "Adapted Fusion Schemes for Multimodal Biometric Authentication", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/ca7b/e47d26abdc6fd3f229e9827484e91dbd7752.pdf"}, {"id": "8a92d17cff2096336796232e4e42bb11c590629d", "title": "Color Face Recognition Based on Curvelet Transform", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/8a92/d17cff2096336796232e4e42bb11c590629d.pdf"}, {"id": "75f715b56972cca50d84ac1bd64529402ef0d57d", "title": "Biometric identification systems", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/75f7/15b56972cca50d84ac1bd64529402ef0d57d.pdf"}, {"id": "d79774d739fa80a07c0e2a60fc2b953b1e8869c3", "title": "Towards practical space-variant based face recognition and authentication", "year": "2014", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6914258"}, {"id": "bc9565725be015135142305305a1c85fd54d99f8", "title": "Discriminant projection embedding for face and palmprint recognition", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/bc95/65725be015135142305305a1c85fd54d99f8.pdf"}, {"id": "d5ccbc99a89934b5f2dd3f0ba7e028ac9c347b73", "title": "A Correlative Two-Step Approach to Hallucinating Faces", "year": 2014, "pdf": "https://doi.org/10.1142/S0218001414540044"}, {"id": "89670862270d31fe7a02d863be51fbe83324ece2", "title": "Ear Recognition Based on Statistical Shape Model", "year": 2006, "pdf": "http://iitlab.bit.edu.cn/mcislab/~zhaoyoudong/paper_pdf/ear%20recognition.pdf"}, {"id": "4b0081d655e1a4df8988908045780938f1c18630", "title": "Human Identification Using Gait", "year": 2006, "pdf": "http://journals.tubitak.gov.tr/elektrik/issues/elk-06-14-2/elk-14-2-5-0510-1.pdf"}, {"id": "a75243922e0a5df5d1bd7d990fdb612fe9f0b376", "title": "Employing vector quantization in a transform domain for face recognition", "year": 2016, "pdf": "https://doi.org/10.1109/UEMCON.2016.7777823"}, {"id": "9cf3dae2c64f2935b3562364c15eb3b34fe5213d", "title": "From Local Geometry to Global Structure: Learning Latent Subspace for Low-resolution Face Image Recognition", "year": 2015, "pdf": "https://doi.org/10.1109/LSP.2014.2364262"}, {"id": "edec04f49b4e570ec4cc2c25e2f443386a2b274b", "title": "Fusion of multiple gait features for human identification", "year": 2008, "pdf": null}, {"id": "1ff79eba66d838d8c1cc90c22fab251bb7babc42", "title": "A new hybrid face recognition system", "year": 2008, "pdf": null}, {"id": "0407866be9938f24acc44afd6760e27e15e6e160", "title": "Simplest representation yet for gait recognition: averaged silhouette", "year": 2004, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICPR.2004.1333741"}, {"id": "2bb6ab1cb16edea5c71174f8e277d001c0084b58", "title": "Transductive Face Sketch-Photo Synthesis", "year": 2013, "pdf": "https://doi.org/10.1109/TNNLS.2013.2258174"}, {"id": "1abafbb83a617c1a63eeba8796fb49b6778db93d", "title": "Online discriminant projective non-negative matrix factorization", "year": 2017, "pdf": "https://doi.org/10.1109/SPAC.2017.8304336"}, {"id": "289f6e4949cd7a5021f11c138c4328d0de0d485c", "title": "Incremental GMMSD2 with applications to feature extraction", "year": 2014, "pdf": "https://doi.org/10.1109/ISCAS.2014.6865279"}, {"id": "ac1bbc44a0b342bb52d8aa92170b6473a148d130", "title": "Linear principal transformation: toward locating features in N-dimensional image space", "year": 2013, "pdf": "https://doi.org/10.1007/s11042-013-1505-x"}, {"id": "9b2ea0a877206943f8556f01922eb475fba5b4a2", "title": "Biologically Inspired Tensor Features", "year": 2009, "pdf": "https://doi.org/10.1007/s12559-009-9028-5"}, {"id": "221c626da43f973f47e5b6424b439a9a0f6c5052", "title": "A Novel Eye Localization Method With Rotation Invariance", "year": 2014, "pdf": "https://doi.org/10.1109/TIP.2013.2287614"}, {"id": "54be36792f90365609dd800d49acdadade5f142a", "title": "Superpixel-Based Face Sketch–Photo Synthesis", "year": 2017, "pdf": null}, {"id": "a5eccba8e68fe72a4f5f3ab593a2d507a5db7c4c", "title": "Reactive Learning Strategy for AsymBoost Based Face Detectors", "year": 2007, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICIAP.2007.106"}, {"id": "eab4046cb0a4f7debecfa55388256fed7f3ee677", "title": "Pose Invariant Color Face Recognition Based on Frequency Analysis and DLDA with Weight Score Classification", "year": 2009, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CSIE.2009.743"}, {"id": "83085e407a8ea5045dbcb037baacd91477a0d5bf", "title": "A biosegmentation benchmark for evaluation of bioimage analysis methods", "year": "2008", "pdf": "http://doi.org/10.1186/1471-2105-10-368"}, {"id": "0854b445973f5df79978cf4d4b031af696244ffb", "title": "Optimal Weighting of Landmarks for Face Recognition", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/0854/b445973f5df79978cf4d4b031af696244ffb.pdf"}, {"id": "47e486a082a244d8cdaf5bd537ff38cdba3dff76", "title": "An Effective Missing-Data Estimation Approach for Small-Size Image Sequences", "year": 2015, "pdf": "https://doi.org/10.1109/MCI.2015.2437311"}, {"id": "4b5158bdc12b3704fdd5d199f83ace8505be2945", "title": "Histograms of Gabor Ordinal Measures for face representation and recognition", "year": 2012, "pdf": "https://doi.org/10.1109/ICB.2012.6199758"}, {"id": "d4ef263f1483a1c5f86d4ece106e7729e3d9fef6", "title": "Improved face recognition algorithm using extended vector quantization histogram features", "year": 2016, "pdf": null}, {"id": "84eca4d0c6a989d57bc5ce900022d4bd5576361b", "title": "An Analysis of Random Projection for Changeable and Privacy-Preserving Biometric Verification", "year": "2010", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5382573"}, {"id": "bbe6cc7adc6b3ee697c673557abbed5d876cdfaa", "title": "Weighted Patches Based Face Super-Resolution Via Adaboost", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8526924"}, {"id": "a281433fab0178d9344fe1e53506187c9c227317", "title": "Low-Rank Linear Embedding for Image Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8356587"}, {"id": "b3be6c26e00671fa8b18587409e656d5bbecdcf7", "title": "Investigation of multimodal template-free biometric techniques and associated exception handling", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/b3be/6c26e00671fa8b18587409e656d5bbecdcf7.pdf"}, {"id": "c791682fa3f716401bca46c6e9a2af495b0d9d51", "title": "Interested in publishing with us ? Contact book", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/c791/682fa3f716401bca46c6e9a2af495b0d9d51.pdf"}, {"id": "1560480870a19fd3d65024a06c428f42a6c16cf8", "title": "Facial feature detection with optimal pixel reduction SVM", "year": "2008", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813372"}, {"id": "558fd79d8f0d7b05c3db32b8efa0cce4bd5d9970", "title": "Biometrics at the frontiers, assessing the impact on Society Technical impact of Biometrics", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/558f/d79d8f0d7b05c3db32b8efa0cce4bd5d9970.pdf"}, {"id": "fdb0472af94a726897c20b6b181c6d71ee293e71", "title": "Quality assessment of image-based biometric information", "year": 2015, "pdf": "https://doi.org/10.1186/s13640-015-0055-8"}, {"id": "c4c1fb882ae8b48c461e1f7c359ea3ea15da29fa", "title": "Gender classification using bayesian classifier with local binary patch features", "year": 2012, "pdf": null}, {"id": "f7101d092537f1bb92678c74e771209332a0e259", "title": "Regression based automatic face annotation for deformable model building", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/f710/1d092537f1bb92678c74e771209332a0e259.pdf"}, {"id": "87665dd7863ebb3738718758926a43cbe9fee20e", "title": "Representative feature chain for single gallery image face recognition", "year": "2008", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4760975"}, {"id": "7f9f4f2f36fec39b50f745be8852feef110eb25d", "title": "Piecewise Regularized Canonical Correlation Discrimination for Low-Resolution Face Recognition", "year": 2010, "pdf": null}, {"id": "867d3aa95bb6a764ce3a03cfb5e99a81aea4a980", "title": "Computer-based recognition of dysmorphic faces", "year": 2003, "pdf": "https://www.ini.rub.de/upload/file/1470692857_b90a305d2e34aa4f57d8/loos-syndromic.pdf"}, {"id": "c973b7c453012deb689b8d1b6a995f956c6351e1", "title": "Feature extraction based on Laplacian bidirectional maximum margin criterion", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/c973/b7c453012deb689b8d1b6a995f956c6351e1.pdf"}, {"id": "1d6a99e11ed5576bec973a35bc50d196c9a38e26", "title": "Generalized Low Dimensional Feature Subspace for Robust Face Recognition on Unseen datasets using Kernel Correlation Feature Analysis", "year": 2007, "pdf": "https://doi.org/10.1109/ICASSP.2007.366143"}, {"id": "4e6ff8ff80a1610bb841b669bb7667413ed2982f", "title": "Dependence Characteristics of Face Recognition Algorithms", "year": 2002, "pdf": "http://pdfs.semanticscholar.org/4e6f/f8ff80a1610bb841b669bb7667413ed2982f.pdf"}, {"id": "86dd8db0587b570ea2237c03cb0126ab3a53317c", "title": "A Novel Face Detection and Facial Feature Detection Algorithm using Skin Colour and Back Propagation Neural Network", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/86dd/8db0587b570ea2237c03cb0126ab3a53317c.pdf"}, {"id": "9752e9c66d6bf09d07d15726f7a7f086b9b13572", "title": "A Reversible Face De-Identification Method based on Robust Hashing", "year": 2008, "pdf": null}, {"id": "746cbf30a3e8f0563fff86da8c3978e9d490aad9", "title": "L1-Norm-Based 2DPCA", "year": 2010, "pdf": "https://doi.org/10.1109/TSMCB.2009.2035629"}, {"id": "fcbfb2744874fa709d72f6dfe37a1202789b1649", "title": "Discriminative and Compact Coding for Robust Face Recognition", "year": 2015, "pdf": "https://doi.org/10.1109/TCYB.2014.2361770"}, {"id": "891b31be76e2baa83745f24c2e2013851dc83cbb", "title": "Improved Face Representation by Nonuniform Multilevel Selection of Gabor Convolution Features", "year": 2009, "pdf": "https://doi.org/10.1109/TSMCB.2009.2018137"}, {"id": "4ef09fe9f7fa027427414cf1f2e9050ac7f5e34d", "title": "A proposed method for the improvement in biometric facial image recognition using document-based classification", "year": "2018", "pdf": "http://doi.org/10.1007/s11227-018-2408-4"}, {"id": "1ed14b85db5963fe122f00f7b3ea60abe05b3604", "title": "Weighted Gabor features in unitary space for face recognition", "year": 2006, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FGR.2006.111"}, {"id": "7f26d56d24f49df7e5323c6c1f7fb026c9d6612f", "title": "Two-directional maximum scatter difference discriminant analysis for face recognition", "year": 2008, "pdf": "https://doi.org/10.1016/j.neucom.2008.01.004"}, {"id": "e9e72259ed3db0ffbf7ee9b3efdbf06e992ea1b9", "title": "Using data compression to enhance the security of identification document", "year": 2006, "pdf": "https://doi.org/10.1109/ICASSP.2006.1660483"}, {"id": "96dfa2a7630fc6800ba8a546df526b8174f54b5c", "title": "Appearance-Based Facial Recognition Using Visible and Thermal Imagery : A Comparative Study \u2217", "year": "2001", "pdf": "https://pdfs.semanticscholar.org/96df/a2a7630fc6800ba8a546df526b8174f54b5c.pdf"}, {"id": "4ecb93ffa3c3b195664ee6d627fd6e46f6798e73", "title": "Investigation of local and global features for face detection", "year": 2011, "pdf": "https://doi.org/10.1109/CIMSIVP.2011.5949242"}, {"id": "5d197c8cd34473eb6cde6b65ced1be82a3a1ed14", "title": "A Face Image Database for Evaluating Out-of-Focus Blur", "year": 2008, "pdf": "http://cdn.intechopen.com/pdfs/20590/InTech-A_face_image_database_for_evaluating_out_of_focus_blur.pdf"}, {"id": "0faf441a1ef1e788fb9ccd20484b104a1fa95ee8", "title": "A brief review on techniques for recognizing images under varying poses", "year": 2015, "pdf": null}, {"id": "bff354d05823c83215183c8824faefbc093de011", "title": "A new efficient SVM and its application to real-time accurate eye localization", "year": 2011, "pdf": "https://frvp.njit.edu/images/new-slider/eSVM.pdf"}, {"id": "3d7a5d1fbec861542631fcb10f58e38f4f51a04c", "title": "Face Recognition Application of Blur-Robust", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/3d7a/5d1fbec861542631fcb10f58e38f4f51a04c.pdf"}, {"id": "21a9f713a664374bfdcca6f4c8f267b85e63ad7a", "title": "Finding Structure with Randomness: Probabilistic Algorithms for Constructing Approximate Matrix Decompositions", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/5f98/7116642d704a5263a880d2671a6e8c434d97.pdf"}, {"id": "3958db5769c927cfc2a9e4d1ee33ecfba86fe054", "title": "Describable Visual Attributes for Face Verification and Image Search", "year": 2011, "pdf": "http://homes.cs.washington.edu/~neeraj/base/base/papers/nk_pami2011_faceattrs.pdf"}, {"id": "19f4020db2a37102ec3236ff72d8c7d3a0992ef9", "title": "Face Recognition by Kernel Independent Component Analysis", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/19f4/020db2a37102ec3236ff72d8c7d3a0992ef9.pdf"}, {"id": "cf5bdf52cf269c9f3ebf548756146763f55e42d6", "title": "Title of Thesis : CONTOUR BASED 3 D FACE MODELING FROM MONOCULAR VIDEO", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/cf5b/df52cf269c9f3ebf548756146763f55e42d6.pdf"}, {"id": "59d66790ac1db459ba78ed0147cde7c66e7ed0c5", "title": "Ensemble-based discriminant learning with boosting for face recognition", "year": 2006, "pdf": "http://www.comm.utoronto.ca/~kostas/Publications2008/pub/78.pdf"}, {"id": "05b8673d810fadf888c62b7e6c7185355ffa4121", "title": "A Comprehensive Survey to Face Hallucination", "year": 2013, "pdf": "https://nannanwang.github.io/My_Papers/IJCV2013.pdf"}, {"id": "0e4fa61871755b5548a5c970c8103f7b2ada24f3", "title": "Partial Face Recognition Based on Template Matching", "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/SITIS.2015.19"}, {"id": "3ec860cfbd5d953f29c43c4e926d3647e532c8b0", "title": "Gabor-Based Region Covariance Matrices for Face Recognition", "year": 2008, "pdf": "https://doi.org/10.1109/TCSVT.2008.924108"}, {"id": "bbb6491e515f6fe35841af2b88edee09b07b00f5", "title": "Kernel-based nonlinear discriminant analysis for face recognition", "year": 2003, "pdf": "https://doi.org/10.1007/BF02945468"}, {"id": "5e9c8d74e83c9900bb06255656d45eba2eaeb260", "title": "A general framework for transfer sparse subspace learning", "year": 2012, "pdf": "https://doi.org/10.1007/s00521-012-1084-1"}, {"id": "c8012c6d71c286777b7a818c4cadb540d8549b43", "title": "Diagnosis of Esophagitis Based on Face Recognition Techniques", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/c801/2c6d71c286777b7a818c4cadb540d8549b43.pdf"}, {"id": "bc9085fb366afe9444e30e5920f3db7042013262", "title": "Metadata-based understanding of impostor pair score variations", "year": 2014, "pdf": "https://doi.org/10.1109/WIFS.2014.7084295"}, {"id": "eca4aa7238d232b108847edb62a49e38cdf8115f", "title": "Color face recognition based on color image correlation similarity discriminant model", "year": 2013, "pdf": "https://doi.org/10.1007/s11042-013-1638-y"}, {"id": "cb457bcb26c1ef9932b0606722f8e073f3e7f494", "title": "Face recognition using adaptive local directional pattern", "year": 2017, "pdf": "https://doi.org/10.1109/SPAC.2017.8304304"}, {"id": "3abf8e5f1f5778b99890b193de59a3a9031e3691", "title": "Revisiting Linear Discriminant Techniques in Gender Recognition", "year": 2011, "pdf": "http://www.dia.fi.upm.es/~pcr/publications/pami2011.pdf"}, {"id": "4ff20e3d56a985640d4a15dd75f04648050f0938", "title": "A novel two-stage weak classifier selection approach for adaptive boosting for cascade face detector", "year": 2013, "pdf": "https://doi.org/10.1016/j.neucom.2011.12.060"}, {"id": "ee87aa52d9642607d86f011c0d7326c4bdc63121", "title": "Automatic Detection of Facial Midline as a Guide for Facial Feature Extraction", "year": "2007", "pdf": "https://pdfs.semanticscholar.org/ee87/aa52d9642607d86f011c0d7326c4bdc63121.pdf"}, {"id": "24c442ac3f6802296d71b1a1914b5d44e48b4f29", "title": "Pose and Expression-Coherent Face Recovery in the Wild", "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.117"}, {"id": "5fce9d893a40c4e0f2ae335b2e68bfd02f1cb2c6", "title": "A Methodology for Detecting Faces from Different Views", "year": 2012, "pdf": "https://doi.org/10.1109/ICTAI.2012.40"}, {"id": "98a60b218ff8addaf213e97e2f4b54d39e45f5b9", "title": "Benchmarking Real World Object Recognition", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/98a6/0b218ff8addaf213e97e2f4b54d39e45f5b9.pdf"}, {"id": "88502625b546607f4992085a313dab1ceb68e4d7", "title": "A New Technique using Cubic Curves and Hamming Distance to Detect Human Emotions", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/8850/2625b546607f4992085a313dab1ceb68e4d7.pdf"}, {"id": "2ee7bf7d0466fca7b6968d5652ed924cd9e86e38", "title": "Performance evaluation of road detection and following systems", "year": 2002, "pdf": "https://doi.org/10.1117/12.580103"}, {"id": "e62519d3041cb445d33f144f8adbe59ca201978e", "title": "A Novel Encoding Scheme for Effective Biometric Discretization: Linearly Separable Subcode", "year": 2013, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2012.122"}, {"id": "a0ebe46668f15092c23dafce65d85319461e95c8", "title": "Parts-based Object Detection Using Multiple Views", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/a0eb/e46668f15092c23dafce65d85319461e95c8.pdf"}, {"id": "852f35acbf6cb3cd0b7036ef7a3ee7b46bc78922", "title": "Heterogeneous Face Recognition by Margin-Based Cross-Modality Metric Learning", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7959077"}, {"id": "5ab2791fbd8d39d02eb8ee76b0f03f5d64371309", "title": "A Methodology for Empirical Performance Evaluationof Page Segmentation AlgorithmsSong", "year": 1999, "pdf": "http://pdfs.semanticscholar.org/5ab2/791fbd8d39d02eb8ee76b0f03f5d64371309.pdf"}, {"id": "0d6008f2b2e198e9eac44e8ad68e590cf6b41c57", "title": "Human and chimpanzee face recognition in chimpanzees (Pan troglodytes): role of exposure and impact on categorical perception.", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/0d60/08f2b2e198e9eac44e8ad68e590cf6b41c57.pdf"}, {"id": "1f41a96589c5b5cee4a55fc7c2ce33e1854b09d6", "title": "Demographic Estimation from Face Images: Human vs. Machine Performance", "year": 2015, "pdf": "http://www.cse.msu.edu/~liuxm/publication/Han_Otto_Liu_Jain_TPAMI14.pdf"}, {"id": "cb3ab7ca2cda0a784c0a94ad2bdff2bcbc376afe", "title": "A Shunting Inhibitory Convolutional Neural Network for Gender Classification", "year": 2006, "pdf": "http://ro.uow.edu.au/cgi/viewcontent.cgi?article=1453&context=infopapers"}, {"id": "9a5292289b781bc7bb50bbe8a6f013b40c084880", "title": "Effects of oxytocin on behavioral and ERP measures of recognition memory for own-race and other-race faces in women and men.", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/9a52/92289b781bc7bb50bbe8a6f013b40c084880.pdf"}, {"id": "3a39f3cd10e2078d750cdc826e8fd203c9beda9e", "title": "Unified Sparse Subspace Learning via Self-Contained Regression", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7962190"}, {"id": "2cd0da17f7d20f01a58a22880f24d34cfddbca7c", "title": "DAML: Domain Adaptation Metric Learning", "year": 2011, "pdf": "https://doi.org/10.1109/TIP.2011.2134107"}, {"id": "8010636454316faf1a09202542af040ffd04fefa", "title": "Performance Parameter Analysis of Face Recognition Based On Fuzzy C-Means Clustering , Shape and Corner Detection", "year": "2013", "pdf": "https://pdfs.semanticscholar.org/8010/636454316faf1a09202542af040ffd04fefa.pdf"}, {"id": "dabf0d227e724bbcbc44d959e0ac0724a564a128", "title": "Fast and robust self-training beard/moustache detection and segmentation", "year": 2015, "pdf": "https://doi.org/10.1109/ICB.2015.7139066"}, {"id": "17d84ca10607442a405f3c4c8b4572bdd79801c2", "title": "Expression robust 3D face recognition via mesh-based histograms of multiple order surface differential quantities", "year": 2011, "pdf": "http://math.univ-lyon1.fr/~morvan/SCANPUBLICATIONS/Huibinicip2011.pdf"}, {"id": "69602cdcf1ad437679a0ed6b8a15ec94e28a8557", "title": "A New Discriminative Sparse Representation Method for Robust Face Recognition via $l_{2}$ Regularization", "year": 2017, "pdf": "https://doi.org/10.1109/TNNLS.2016.2580572"}, {"id": "fdfb82f654e03c4d884713278077cc66e42afe41", "title": "Analysis of variance of Gabor filter banks parameters for optimal face recognition", "year": "2011", "pdf": "http://doi.org/10.1016/j.patrec.2011.09.013"}, {"id": "aa07203067566f251041f73ee087aa7dfb847509", "title": "Biometric System Using Cryptography: A Survey", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/aa07/203067566f251041f73ee087aa7dfb847509.pdf"}, {"id": "bef2854893462ae28bdb2243bba4d010d3909289", "title": "TUBITAK UZAY at TRECVID 2010: Content-Based Copy Detection and Semantic Indexing", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/bef2/854893462ae28bdb2243bba4d010d3909289.pdf"}, {"id": "e67e6c5cc693e2b3e41a98bdd70985614889330f", "title": "Demographic classification: Do gender and ethnicity affect each other?", "year": "2012", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6317383"}, {"id": "a9cecfbc47a39fa0158a5f6fd883e0e5ac2aa134", "title": "Optimal Subspace Analysis for Face Recognition", "year": 2005, "pdf": "https://doi.org/10.1142/S0218001405004071"}, {"id": "3ba8f8b6bfb36465018430ffaef10d2caf3cfa7e", "title": "Local Directional Number Pattern for Face Analysis: Face and Expression Recognition", "year": 2013, "pdf": "http://www.chennaisunday.com/IEEE%202013%20Dotnet%20Basepaper/Local%20Directional%20Number%20Pattern%20for%20Face%20Analysis%20Face%20and%20Expression%20Recognition.pdf"}, {"id": "4af7a077cd5dbabac0e1f52296331bb179b14706", "title": "Face gender recognition using improved appearance-based Average Face Difference and support vector machine", "year": 2010, "pdf": null}, {"id": "786e57ed6877dc8491b1bb9253f8b82c02732977", "title": "Efficient approach to de-identifying faces in videos", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/786e/57ed6877dc8491b1bb9253f8b82c02732977.pdf"}, {"id": "640cf5969771523267d1b1d69ce4ede72fdcddea", "title": "Example-based 3D face reconstruction from uncalibrated frontal and profile images", "year": 2015, "pdf": "https://doi.org/10.1109/ICB.2015.7139051"}, {"id": "6cad808376cf5c5dc8202471c046c2cb80afdc09", "title": "A new approach for color image segmentation based on color mixture", "year": 2011, "pdf": "https://doi.org/10.1007/s00138-011-0395-z"}, {"id": "8097841cc4f3559e32c32db97624255808bacf22", "title": "Biometrie symmetry: Implications on template protection", "year": 2017, "pdf": "https://doi.org/10.23919/EUSIPCO.2017.8081346"}, {"id": "dc8b25e35a3acb812beb499844734081722319b4", "title": "The FERET Promising Research database and evaluation procedure for face - recognition algorithms", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/dc8b/25e35a3acb812beb499844734081722319b4.pdf"}, {"id": "c593c6080c75133191a27381a58cd07c97aa935b", "title": "Gender Classification Using a Min-Max Modular Support Vector Machine with Incorporating Prior Knowledge", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/c593/c6080c75133191a27381a58cd07c97aa935b.pdf"}, {"id": "b15663eb2e0a7f92dbb5364c5a8e0742105f4ebe", "title": "Selecting highly optimal architectural feature sets with Filtered Cartesian Flattening", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/b156/63eb2e0a7f92dbb5364c5a8e0742105f4ebe.pdf"}, {"id": "babc76d0550a647dc605dca9a69bda1af8e0c872", "title": "AN EFFECTIVE COLOR FACE RECOGNITION BASED ON BEST COLOR FEATURE SELECTION ALGORITHM USING WEIGHTED FEATURES FUSION SYSTEM", "year": "2013", "pdf": "https://pdfs.semanticscholar.org/babc/76d0550a647dc605dca9a69bda1af8e0c872.pdf"}, {"id": "d818568838433a6d6831adde49a58cef05e0c89f", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984"}, {"id": "2b2acf2de016f0fb3538ceaaf3a9ba869b466089", "title": "Finding your Lookalike: Measuring Face Similarity Rather than Face Identity", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.05252.pdf"}, {"id": "10c7575e7db69a208bfb21e3fc0cbc3f7698e99d", "title": "New sparse representation methods; application to image compression and indexing. (Nouvelles m\u00e9thodes de repr\u00e9sentations parcimonieuses ; application \u00e0 la compression et l'indexation d'images)", "year": "2010", "pdf": "https://pdfs.semanticscholar.org/a309/ff76c5c3cdc4b7578c9cede814688c6f4521.pdf"}, {"id": "3f768179cea3c710dac8e68c0a890c48cfdc8ed1", "title": "Face Template Protection Using Deep Convolutional Neural Network", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8575550"}, {"id": "61692cffff60568da43780df38876c11390ccdc8", "title": "Gabor Orientation Histogram for Face Representation and Recognition", "year": "2013", "pdf": "https://pdfs.semanticscholar.org/6169/2cffff60568da43780df38876c11390ccdc8.pdf"}, {"id": "109f2ec25cff824458328eafe53dda07abf4da94", "title": "Face recognition using weighted distance transform", "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6512145"}, {"id": "c161f2a98834db4cbb7b42d2853d0bab491b8e8b", "title": "Recognition of Face Images through the FusionApproach", "year": "2014", "pdf": null}, {"id": "7d841607ce29ff4a75734ffbf569431425d8342f", "title": "Bimodal 2D-3D face recognition using a two-stage fusion strategy", "year": 2015, "pdf": "https://doi.org/10.1109/IPTA.2015.7367146"}, {"id": "05bd3ebba35a683e5035a3920352bc7aec85bd78", "title": "License Plate Detection on Autonomous Surveillance Systems", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/05bd/3ebba35a683e5035a3920352bc7aec85bd78.pdf"}, {"id": "267a60d699bc28d891af65fd3ac11471e377cb1c", "title": "The CSU Face Identification Evaluation System User\u2019s Guide: Version 5.0", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/267a/60d699bc28d891af65fd3ac11471e377cb1c.pdf"}, {"id": "4509654fa18710870797e89c40bdebdd3b9e9950", "title": "A novel pupil localization method based on GaborEye model and radial symmetry operator", "year": 2004, "pdf": "http://vipl.ict.ac.cn/sites/default/files/papers/files/2004_ICIP_pyang_A%20Novel%20Pupil%20Localization%20Method%20Based%20on%20Gaboreye%20Model%20and%20Radial%20Symmetry%20Operator.pdf"}, {"id": "ea1f2f89511554081e8676d4181b045219da5885", "title": "Depth Estimation of Face Images Using the Nonlinear Least-Squares Model", "year": 2013, "pdf": "https://doi.org/10.1109/TIP.2012.2204269"}, {"id": "470690bb2c165b8e2c49e25add434216b59b1987", "title": "Incremental Linear Discriminant Analysis for Face Recognition", "year": 2008, "pdf": "https://doi.org/10.1109/TSMCB.2007.908870"}, {"id": "b0b6346104cbf878a072da93d49ad6e9f65befaf", "title": "SCiFI - A System for Secure Face Identification", "year": 2010, "pdf": "http://www278.pair.com/bennyp/PAPERS/scifi.pdf"}, {"id": "9055b155cbabdce3b98e16e5ac9c0edf00f9552f", "title": "MORPH: a longitudinal image database of normal adult age-progression", "year": 2006, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78"}, {"id": "ec89f2307e29cc4222b887eb0619e0b697cf110d", "title": "Face Recognition Using Dual-Tree<newline/> Complex Wavelet Features", "year": 2009, "pdf": "https://doi.org/10.1109/TIP.2009.2027361"}, {"id": "1888bf50fd140767352158c0ad5748b501563833", "title": "A Guided Tour of Face Processing", "year": "", "pdf": "http://pdfs.semanticscholar.org/1888/bf50fd140767352158c0ad5748b501563833.pdf"}, {"id": "5f1371c7d313c58af1fd7ac9574a2965c5abefe9", "title": "AView From the Other Side of the Mean", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/5f13/71c7d313c58af1fd7ac9574a2965c5abefe9.pdf"}, {"id": "f834c50e249c9796eb7f03da7459b71205dc0737", "title": "Enhanced Patterns of Oriented Edge Magnitudes for Face Recognition and Image Matching", "year": 2012, "pdf": "https://doi.org/10.1109/TIP.2011.2166974"}, {"id": "d95e6185f82e3ef3880a98122522eca8c8c3f34e", "title": "A video database of moving faces and people", "year": 2005, "pdf": "http://bbs.utdallas.edu/facelab/docs/4_05_otoole-pami.pdf"}, {"id": "a8cd4bc012e08a2c6b2ca7618ec411a8d9e523e7", "title": "Mixture of experts for classification of gender, ethnic origin, and pose of human faces", "year": 2000, "pdf": "http://pdfs.semanticscholar.org/a8cd/4bc012e08a2c6b2ca7618ec411a8d9e523e7.pdf"}, {"id": "21e666abe02b1cab090825199471db7f744aa424", "title": "A Real-time Face Recognition System Using Multiple Mean Faces and I Dual Mode Fishefwaces", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/21e6/66abe02b1cab090825199471db7f744aa424.pdf"}, {"id": "8de9380536a5f7f29cfe59578041efe7c8ea20bd", "title": "Facial image-based gender classification using Local Circular Patterns", "year": 2012, "pdf": "http://ieeexplore.ieee.org/document/6460658/"}, {"id": "209c4bae72a3810114c67e0ca435fcd1646986f8", "title": "Body gait tracking with a Fisher Discriminance on feature analyse", "year": 2012, "pdf": null}, {"id": "86450dfae8b38a051c21d6f49073671c3a7b5253", "title": "CloudID: Trustworthy cloud-based and cross-enterprise biometric identification", "year": 2015, "pdf": "https://doi.org/10.1016/j.eswa.2015.06.025"}, {"id": "7200b479b791f336552ab421871c217e73bf314e", "title": "Effects of the menstrual cycle on looking preferences for faces in female rhesus monkeys", "year": 2006, "pdf": null}, {"id": "2a2d75e5900faa7e6af8ae050e12926e0f8a9c16", "title": "Face Recognition Method Based on Improved LDA", "year": 2017, "pdf": null}, {"id": "f3a094d213b373d0aa20e157fdedcd349c1d1777", "title": "Single-sample-per-person-based face recognition using fast Discriminative Multi-manifold Analysis", "year": "2014", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7041709"}, {"id": "027d856064df21912b7bc5018b6392e776f8bf09", "title": "Face Authentication using Euclidean Distance Model with PSO Algorithm", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/af4e/cf51bc5b172243b74a5ceb7b8bfe2757f24b.pdf"}, {"id": "5ab4be9e574d7ce9d32c74e9dd25914d3720edb6", "title": "Experiments with Multi-view Multi-instance Learning for Supervised Image Classification", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/5ab4/be9e574d7ce9d32c74e9dd25914d3720edb6.pdf"}, {"id": "52aaffdc7f8d96f2c8cc8935d8cf5803f193941d", "title": "Ensemble of texture descriptors for face recognition obtained by varying feature transforms and preprocessing approaches", "year": 2017, "pdf": "https://doi.org/10.1016/j.asoc.2017.07.057"}, {"id": "d45f5f160ceff67d316b311778d5c4ea9a9279ac", "title": "Dimensionality reduction in subspace face recognition", "year": 2007, "pdf": null}, {"id": "b824cdb86ed0b5c43d46a9811c170a661b4646e9", "title": "A test sample oriented two-phase discriminative dictionary learning algorithm for face recognition", "year": "2016", "pdf": "http://doi.org/10.3233/IDA-150296"}, {"id": "783ea3478d187846a770eec2326bf7c0080a3005", "title": "Large-scale adaptive semi-supervised learning via unified inductive and transductive model", "year": 2014, "pdf": "http://doi.acm.org/10.1145/2623330.2623731"}, {"id": "ef9dd272eae0f66c3d62785a6b29c2cc24e6dc19", "title": "Face image illumination quality assessment for surveillance video using KPLSR", "year": 2016, "pdf": null}, {"id": "a8613d1526c816dc5675806bae6a16dd864c3237", "title": "Incremental learning of bidirectional principal components for face recognition", "year": 2010, "pdf": "https://doi.org/10.1016/j.patcog.2009.05.020"}, {"id": "de2aaabc3fcc21042a64dc266f560dad91028b79", "title": "Energy-Aware Real-Time Face Recognition System on Mobile CPU-GPU Platform", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/de2a/aabc3fcc21042a64dc266f560dad91028b79.pdf"}, {"id": "905e3fe7a0da114d6516dc4cfac1c1c2a55213de", "title": "Two-dimensional complete neighborhood preserving embedding", "year": 2013, "pdf": "https://doi.org/10.1007/s00521-013-1365-3"}, {"id": "618d3ad69c677016547098e01b9c6e94c260de1d", "title": "What are customers looking at?", "year": 2007, "pdf": "http://cvlab.cse.msu.edu/pdfs/liu_krahnstoever_yu_tu_avss_2007.pdf"}, {"id": "7ed88b4444de20cda76cd73f7374ccb76e753002", "title": "Feature selection method with common vector and discriminative common vector approaches", "year": 2011, "pdf": null}, {"id": "ad5965e00d9511528c91adea0b356ad1e7081f0e", "title": "A weighted probabilistic approach to face recognition from multiple images and video sequences", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/ad59/65e00d9511528c91adea0b356ad1e7081f0e.pdf"}, {"id": "b8a53daa97fb917a89c351c47f0b197573e20023", "title": "Recognizing Faces---An Approach Based on Gabor Wavelets", "year": "2005", "pdf": "https://pdfs.semanticscholar.org/3b7b/426f179ca3df6dc73c80850155fe9107b03c.pdf"}, {"id": "0f1ecc5d8834df4390755cb97c239a9f4248b30f", "title": "Individual Stable Space: An Approach to Face Recognition Under Uncontrolled Conditions", "year": 2008, "pdf": "http://www.researchgate.net/profile/Kate_Smith-Miles/publication/3304170_Individual_Stable_Space_An_Approach_to_Face_Recognition_Under_Uncontrolled_Conditions/links/00b4952d453ff51c77000000.pdf"}, {"id": "10acb3445cbd8224f1ab6616f00b212586bbb967", "title": "Face super-resolution using a hybrid model", "year": 2008, "pdf": null}, {"id": "2dc3795a292ceb1c3070d1ab1e0df641f596b022", "title": "An enhanced Local Ternary Patterns method for face recognition", "year": 2014, "pdf": null}, {"id": "72367499712525a9f82d0a1d844e2a76d749304d", "title": "Multi - Layer Biometric System for the Port of Los Angeles Final Report", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/7236/7499712525a9f82d0a1d844e2a76d749304d.pdf"}, {"id": "dfa76da5eb2729059ef303cbc25e01ca30383615", "title": "Genetic & Evolutionary Type II feature extraction for periocular-based biometric recognition", "year": 2010, "pdf": null}, {"id": "93dd71a7261220374b924137b87da59bd1c3b805", "title": "Effective Feature Extraction in High-Dimensional Space", "year": 2008, "pdf": "https://doi.org/10.1109/TSMCB.2008.927276"}, {"id": "ae5743712462f99bc3618eaed9814ef305048ff4", "title": "Face Recognition Method Based on the Adaptive Fuzzy Weighted Sub-Pattern SVD", "year": 2017, "pdf": null}, {"id": "9b2a90eb75a7dfe5436fe085d3ec094e041b440c", "title": "Locality-Preserved Maximum Information Projection", "year": 2008, "pdf": "https://doi.org/10.1109/TNN.2007.910733"}, {"id": "dd1a85e4c2ab2ed241af0e8d6e588498597abc8d", "title": "Low-resolution face recognition: a review", "year": 2013, "pdf": "https://doi.org/10.1007/s00371-013-0861-x"}, {"id": "e835a8cf0c7046b409b16b6720b27769ad83269d", "title": "Intraclass Retrieval of Nonrigid 3D Objects: Application to Face Recognition", "year": "2007", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4042698"}, {"id": "2d640ca0ca8a38d1fd69370ab2f69bd10b6fe1ed", "title": "Face Recognition Based on Image Enhancement and Gabor Features", "year": 2006, "pdf": null}, {"id": "db9bdf10452e99a6dcbc5c01e8c934f85d61b101", "title": "A Gabor Pseudo Fisherface Based Face Recognition Algorithm for LSI Implementation", "year": 2009, "pdf": "http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2009.236"}, {"id": "6017b19c4c0e6c0e5b32e54efda6eff78b69d1dd", "title": "An Efficient 3D Geometrical Consistency Criterion for Detection of a Set of Facial Feature Points", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/6017/b19c4c0e6c0e5b32e54efda6eff78b69d1dd.pdf"}, {"id": "3d6229044f6605604818f39f08c5270a5a132a03", "title": "Projective Nonnegative Matrix Factorization based on \u03b1-Divergence", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/3d62/29044f6605604818f39f08c5270a5a132a03.pdf"}, {"id": "91303efc57f2241b6aaee7ae86c4be60a7bad7f8", "title": "Embedded door access control systems based on face recognition: A survey", "year": 2013, "pdf": "https://doi.org/10.1109/ICSPCS.2013.6723968"}, {"id": "b3fa62a7028578be8d1f8eb0877c762a4d6639c1", "title": "Efficient Face Recognition System Using Random Forests", "year": "", "pdf": "http://pdfs.semanticscholar.org/b3fa/62a7028578be8d1f8eb0877c762a4d6639c1.pdf"}, {"id": "bab9bf053c609d7e9417ff1e285b992798bb01f5", "title": "Collaborative representation analysis methods for feature extraction", "year": "2016", "pdf": "http://doi.org/10.1007/s00521-016-2299-3"}, {"id": "c129ec30dbc02c30779ca5812d084758aa69c7e5", "title": "Two-Dimensional Inverse FDA for Face Recognition", "year": 2008, "pdf": null}, {"id": "58970f1f51432a094faaeb3f4f70aa1778d61a42", "title": "Face Alignment Via Component-Based Discriminative Search", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/5897/0f1f51432a094faaeb3f4f70aa1778d61a42.pdf"}, {"id": "c3a53b308c7a75c66759cbfdf52359d9be4f552b", "title": "On Detecting Partially Occluded Faces with Pose Variations", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ISPAN-FCST-ISCC.2017.16"}, {"id": "534551fe2c4ce76c9a9a752364cd9c3af0dc9093", "title": "Gait Dynamics for Recognition and Classification", "year": 2001, "pdf": "http://pdfs.semanticscholar.org/5345/51fe2c4ce76c9a9a752364cd9c3af0dc9093.pdf"}, {"id": "4d5c54c4a0f8c7758f31560f2b2d610db0a1306b", "title": "Separable linear discriminant analysis", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/4d5c/54c4a0f8c7758f31560f2b2d610db0a1306b.pdf"}, {"id": "aa766115e8bd91a05cb8fdded5ae036c9b03c7d3", "title": "Using colour local binary pattern features for face recognition", "year": 2010, "pdf": "http://www.researchgate.net/profile/Konstantinos_Plataniotis/publication/221118426_Using_colour_local_binary_pattern_features_for_face_recognition/links/00b7d514320a901302000000.pdf"}, {"id": "9327886ca53bc1e08df935d77ed331cbc085de01", "title": "Robust Approaches for Multi-Label Face Classification", "year": 2016, "pdf": "https://doi.org/10.1109/DICTA.2016.7797076"}, {"id": "a7a8b35aef658490920d62623870606c46c95c15", "title": "Formal Implementation of a Performance Evaluation Model for the Face Recognition System", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/a7a8/b35aef658490920d62623870606c46c95c15.pdf"}, {"id": "2163c401f6345350e38b2ce6f39a42871fb22c84", "title": "On the effectiveness of soft biometrics for increasing face verification rates", "year": "2015", "pdf": "http://doi.org/10.1016/j.cviu.2015.03.003"}, {"id": "05c9a18cc71a818196ca53fda88cbacf434d3ee9", "title": "Comparing two video-based techniques for driver fatigue detection: classification versus optical flow approach", "year": 2011, "pdf": "https://doi.org/10.1007/s00138-011-0321-4"}, {"id": "9cb916aa3672a8071d2d77931ed221f4f98138f2", "title": "Composition-Aided Face Photo-Sketch Synthesis", "year": "2017", "pdf": "https://arxiv.org/pdf/1712.00899.pdf"}, {"id": "9df3c81ce84b027d9cda37c754250d31a5561005", "title": "Semantic Scene Modeling and Retrieval", "year": 2004, "pdf": "http://pdfs.semanticscholar.org/9df3/c81ce84b027d9cda37c754250d31a5561005.pdf"}, {"id": "d03e4e938bcbc25aa0feb83d8a0830f9cd3eb3ea", "title": "Face Recognition with Patterns of Oriented Edge Magnitudes", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/d03e/4e938bcbc25aa0feb83d8a0830f9cd3eb3ea.pdf"}, {"id": "7ab7befcd319d55d26c1e4b7b9560da5763906f3", "title": "Facial Trait Code", "year": 2013, "pdf": "http://www.researchgate.net/profile/Lee_Ping-Han/publication/236160185_Facial_Trait_Code/links/0c96051e26825bd65a000000.pdf"}, {"id": "faa917f464183173d22867da6cc213af905dd1d0", "title": "Face Recognition Using Total Margin-Based Adaptive Fuzzy Support Vector Machines", "year": 2007, "pdf": "https://doi.org/10.1109/TNN.2006.883013"}, {"id": "ac5ee48955ce97433cadef011bedd8e65a574255", "title": "Image classification using kernel collaborative representation with regularized least square", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/ac5e/e48955ce97433cadef011bedd8e65a574255.pdf"}, {"id": "fbfbeff230b312f093227dba93ccee63a16ec88d", "title": "Coupled Learning for Facial Deblur", "year": 2016, "pdf": "https://doi.org/10.1109/TIP.2015.2509418"}, {"id": "433d2d5528d1401a402f2c1db40b933c494f11ba", "title": "Face Recognition Based on Discriminant Evaluation in the Whole Space", "year": 2007, "pdf": "https://doi.org/10.1109/ICASSP.2007.366218"}, {"id": "b15bd6376978321e37f339f0d1e7d5124559d389", "title": "Face Recognition Based on Projected Color Space With Lighting Compensation", "year": 2011, "pdf": "https://doi.org/10.1109/LSP.2011.2163798"}, {"id": "093f8e0feb5635eac86ce58080ca80ab5fc88e17", "title": "Dirichlet process mixture models on symmetric positive definite matrices for appearance clustering in video surveillance applications", "year": 2011, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995723"}, {"id": "1a2431e3b35a4a4794dc38ef16e9eec2996114a1", "title": "Automated Face Recognition: Challenges and Solutions", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/1a24/31e3b35a4a4794dc38ef16e9eec2996114a1.pdf"}, {"id": "6766142f415561701250bbaa691fcd8e4173f99d", "title": "Biometric Evaluation on the Cloud: A Case Study with HumanID Gait Challenge", "year": "2012", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6424951"}, {"id": "d00fc71e4ed39ec5954ce9da4faa9c4eddb71929", "title": "Face Alignment with Unified Subspace Optimization of Active Statistical Models", "year": "2006", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1612999"}, {"id": "443ddd7e7e367328b175c4e79701a080948fc9ee", "title": "Face Recognition Using Classification-Based Linear Projections", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/443d/dd7e7e367328b175c4e79701a080948fc9ee.pdf"}, {"id": "84b451a5749837d28af5e96fd3349d852fefe4f2", "title": "A Robust Face Recognition Approach through Different Local Features", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/84b4/51a5749837d28af5e96fd3349d852fefe4f2.pdf"}, {"id": "c3d0443e83145d204c5bc7e9f0312003f17ddd71", "title": "A general framework for the evaluation of symbol recognition methods", "year": "2006", "pdf": "http://doi.org/10.1007/s10032-006-0033-x"}, {"id": "5d3a45e25d0e3221bb581edf74bb920aaaa11b02", "title": "Non-negative Structured Pyramidal Neural Network for Pattern Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8489216"}, {"id": "c777d4cb33219274183fbc212ad5faf25638c8ad", "title": "Mongoloid and Non-Mongoloid Race Classification from Face Image Using Local Binary Pattern Feature Extractions", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8528783"}, {"id": "38d8cf143e64e013b6b9eb3ea73957c3b2ffbee9", "title": "A Novel Local Pattern Descriptor\u2014Local Vector Pattern in High-Order Derivative Space for Face Recognition", "year": "2014", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7025047"}, {"id": "3035bcbad93767570d444c136f4036f357648d60", "title": "Feature Extraction for Incomplete Data Via Low-Rank Tensor Decomposition With Feature Regularization.", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/3035/bcbad93767570d444c136f4036f357648d60.pdf"}, {"id": "8258ba9179f5c22f9f544de0e4717b37adbaade9", "title": "Face Recognition Using Multiscale and Spatially Enhanced Weber Law Descriptor", "year": 2012, "pdf": "https://doi.org/10.1109/SITIS.2012.24"}, {"id": "28525f9bb0797fdf7c0a8a9d1666015c60610077", "title": "Development of Mobile Face Verification Based on Locally Normalized Gabor Wavelets", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/2852/5f9bb0797fdf7c0a8a9d1666015c60610077.pdf"}, {"id": "782ddac9ea1b378bc72adf7c31f67f3f7c6f8d25", "title": "Face image super-resolution via weighted patches regression", "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900242"}, {"id": "33b4dc370731535e39df8ce22fb43dd80d811dc4", "title": "Face identification in near IR-videos using smart eye tracking profiles", "year": "2015", "pdf": null}, {"id": "06b72db17f9af33996f97f45eca1473658973286", "title": "A novel approach for multi-pose face detection by using skin color and FloatBoost", "year": 2010, "pdf": null}, {"id": "d15cd6285500a3c6b1fdc607720577fd4040baba", "title": "Kernel-based Regularized Neighbourhood Preserving Embedding in face recognition", "year": 2012, "pdf": null}, {"id": "0a3fa8e6f158e7faec024d83964751a5d59fe836", "title": "ICCV - 99 Cover Sheet", "year": 1999, "pdf": "http://pdfs.semanticscholar.org/0a3f/a8e6f158e7faec024d83964751a5d59fe836.pdf"}, {"id": "52036ffa9407fae480482615dd76b00d0c50f2fb", "title": "Analysis of Subspace-based Face Recognition Techniques under Changes in Imaging Factors", "year": 2007, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ITNG.2007.37"}, {"id": "58914e00b4927387d07bfe1efaed7cc7e51f1d77", "title": "Principal Component Net Analysis for Face Recognition", "year": 2006, "pdf": "https://doi.org/10.1007/11925231_70"}, {"id": "283371bb21aaddf92ea975869a69b3b1aad8a4db", "title": "Recognizing faces using Adaptively Weighted Sub-Gabor Array from a single sample image per enrolled subject", "year": 2010, "pdf": "https://doi.org/10.1016/j.imavis.2009.06.013"}, {"id": "748835f09adf69955de260fe2497906908a7609c", "title": "A survey of face recognition algorithms and testing results", "year": 1997, "pdf": null}, {"id": "77cb6ea4feff6f44e9977cc7572185d24e48ce40", "title": "On the Complementarity of Face Parts for Gender Recognition", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/77cb/6ea4feff6f44e9977cc7572185d24e48ce40.pdf"}, {"id": "040bd69b800a8a169da02463e78504027a94cbe2", "title": "Automatic frontal view face image synthesis", "year": 2010, "pdf": "https://doi.org/10.1109/ICIP.2010.5653125"}, {"id": "a1f4cd1e435f093db6bc88e7a815c17a6b9d24b5", "title": "Face Recognition Using Local Gabor Phase Characteristics", "year": 2010, "pdf": null}, {"id": "6f0001fd57244bdbca5a1cad8143aff2387d4454", "title": "Multiple-rank supervised canonical correlation analysis for feature extraction, fusion and recognition", "year": 2017, "pdf": "https://doi.org/10.1016/j.eswa.2017.05.017"}, {"id": "189ae7cba20024fac64a21d90e539f2c2a1765ad", "title": "Statistical transformations of frontal models for non-frontal face verification", "year": 2004, "pdf": "http://users.rsise.anu.edu.au/~conrad/cs/papers/cached/sanderson_icip_2004.pdf"}, {"id": "4e7ef87467306d34eaaf2a38d0f10583b7f64754", "title": "Enhancing incremental learning/recognition via efficient neighborhood estimation", "year": 2010, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICME.2010.5582922"}, {"id": "46216757ae27573fcea9fadc858bb65e48d8f85c", "title": "Robust Eye Detection from Facial Image based on Multi-cue Facial Information", "year": 2007, "pdf": null}, {"id": "91541fdafab6bc46955c084a8164bc5fc42568e7", "title": "Discrminative Geometry Preserving Projections", "year": 2009, "pdf": "https://doi.org/10.1109/ICIP.2009.5414091"}, {"id": "b7d540cd0de72e984cdec44afa4a4d039cfd5eea", "title": "Object Tracking Benchmark", "year": 2015, "pdf": "http://faculty.ucmerced.edu/mhyang/papers/pami15_tracking_benchmark.pdf"}, {"id": "11916461bb0a150ef31d4a4249160dce451d77db", "title": "Robust face recognition via discriminative and common hybrid dictionary learning", "year": "2017", "pdf": "http://doi.org/10.1007/s10489-017-0956-6"}, {"id": "e57acaedd5a76e338bae790d8464aadada4d4902", "title": "Subspaces versus Submanifolds: a Comparative Study in Small Sample Size Problem", "year": 2009, "pdf": "https://doi.org/10.1142/S0218001409007168"}, {"id": "ee46e391288dd3bc3e71cb47715a83dacb9d2907", "title": "Face expression recognition using integrated approach of Local Directional Number and Local Tetra Pattern", "year": 2015, "pdf": null}, {"id": "5322f56b8768f8053e8b0c4944d81377d1d60411", "title": "FACE ALIGNMENT USING BOOSTED APPEARANCE MODEL (Discriminative Appearance Model)", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/5322/f56b8768f8053e8b0c4944d81377d1d60411.pdf"}, {"id": "12d8ee0a95daf4927b5f5fb0d0b6bf73850bdeea", "title": "Collaborative Face Recognition for Improved Face Annotation in Personal Photo Collections Shared on Online Social Networks", "year": 2011, "pdf": "http://koasas.kaist.ac.kr/bitstream/10203/22860/1/64.pdf"}, {"id": "8b5e32389af26e86e0b00f21d48ef938c3b6c699", "title": "Robotic Arm-Based Face Recognition Software Test Automation", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8409387"}, {"id": "01574e0d3be1b866471b83e39f7404c80d608e63", "title": "Application of Blind Deblurring Algorithm for Face Biometric", "year": "2014", "pdf": "https://pdfs.semanticscholar.org/586d/4e31b2b2be4b43e7f3d53530c0a35a3aa93b.pdf"}, {"id": "d8cf6b19c75489dd7a5f4107e2e3ec494274da41", "title": "Partial Relevance in Interactive Facial Image Retrieval", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/d8cf/6b19c75489dd7a5f4107e2e3ec494274da41.pdf"}, {"id": "dbbba520c98308df34b69ba74fa033292662f9f1", "title": "A convolutional neural network based on TensorFlow for face recognition", "year": 2017, "pdf": null}, {"id": "c5697c28272dc7e81c451a63495f229c740d597b", "title": "Intraclass Retrieval of Nonrigid 3D Objects: Application to Face Recognition", "year": 2007, "pdf": "http://www.researchgate.net/profile/Ioannis_Kakadiaris/publication/6630515_Intraclass_retrieval_of_nonrigid_3D_objects_application_to_face_recognition/links/02e7e51757d77c1bbd000000.pdf"}, {"id": "e98dad1ab4eee90dd05b6db36498f021ee21b29b", "title": "Parallelization between face localization and person identification", "year": 2004, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AFGR.2004.1301528"}, {"id": "c2c3dd9683f522240c0cec76f924c824a751f4d3", "title": "Face recognition using Zernike and complex Zernike moment features", "year": 2011, "pdf": null}, {"id": "78b329bb205af90e6eac5fc428ce68f73b691d59", "title": "Log-Linear Elliptic Transform for Frontal-Face Parameter Estimation", "year": 2007, "pdf": "https://doi.org/10.1109/ICSMC.2007.4414192"}, {"id": "c2e3764eeb933910b9be449b42eee1aed3970345", "title": "Maximum Confidence Hidden Markov Modeling", "year": 2006, "pdf": "https://www.researchgate.net/profile/Jen-Tzung_Chien/publication/224642145_Maximum_Confidence_Hidden_Markov_Modeling/links/0fcfd5122945a0aea7000000.pdf"}, {"id": "9c265d5c870b46bef8526ec7eaf0f9022a9736e7", "title": "Bayesian methods for face recognition from video", "year": 2002, "pdf": "http://degas.umiacs.umd.edu/Hid/kevin_icassp02.pdf"}, {"id": "972ef9ddd9059079bdec17abc8b33039ed25c99c", "title": "A Novel on understanding How IRIS Recognition works", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/972e/f9ddd9059079bdec17abc8b33039ed25c99c.pdf"}, {"id": "853bd61bc48a431b9b1c7cab10c603830c488e39", "title": "Learning Face Representation from Scratch", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf"}, {"id": "86f72a4391c3b90a03f8fbbd28d4846947f028d6", "title": "Principal Manifolds and Probabilistic Subspaces for Visual Recognition", "year": 2002, "pdf": "http://pdfs.semanticscholar.org/a8ca/ea2199061186c793e473860835bd9b818157.pdf"}, {"id": "91d6d25512e3297830b7d8b330f311656db09831", "title": "An experimental comparison of gender classification methods", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/91d6/d25512e3297830b7d8b330f311656db09831.pdf"}, {"id": "00e266ce507bd7bc248190e135824619decd5ac9", "title": "Mixed group ranks: preference and confidence in classifier combination", "year": 2004, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2004.48"}, {"id": "dec56c82568b1ae7ff532017d842f2ed00eba244", "title": "Is ICA significantly better than PCA for face recognition?", "year": 2005, "pdf": "http://repository.lib.polyu.edu.hk/jspui/bitstream/10397/197/1/Conf_V1_05.pdf"}, {"id": "6890f210cdc574caa89fa94933ec942360aa1e0c", "title": "How effective are landmarks and their geometry for face recognition?", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/6890/f210cdc574caa89fa94933ec942360aa1e0c.pdf"}, {"id": "b7eead8586ffe069edd190956bd338d82c69f880", "title": "A Video Database for Facial Behavior Understanding", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/b7ee/ad8586ffe069edd190956bd338d82c69f880.pdf"}, {"id": "00f5c0ad23a9d7c312a316d2a8c6539aadfc95aa", "title": "Simultaneous learning of a discriminative projection and prototypes for Nearest-Neighbor classification", "year": 2008, "pdf": "http://mplab.ucsd.edu/wp-content/uploads/cvpr2008/conference/data/papers/250.pdf"}, {"id": "56c448434df9f01de306815122f6517a33235e22", "title": "Label propagation through sparse neighborhood and its applications", "year": 2012, "pdf": "https://doi.org/10.1016/j.neucom.2012.03.017"}, {"id": "078937f636ab702137052276c62afb75ad1033ea", "title": "Sequential Row\u2013Column 2DPCA for face recognition", "year": 2011, "pdf": "http://www.researchgate.net/profile/Karl_Ricanek/publication/251349523_Sequential_RowColumn_2DPCA_for_face_recognition/links/0c960526fc70b525e0000000.pdf"}, {"id": "653942873bc7ea6f1056739dc5015ec3631d9bbe", "title": "Face Detection Techniques- A Review", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/6539/42873bc7ea6f1056739dc5015ec3631d9bbe.pdf"}, {"id": "48be9300acdc484100436f32bd409a89a7dc1ef7", "title": "Chapter 4 FACE RECOGNITION AND ITS APPLICATIONS", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/48be/9300acdc484100436f32bd409a89a7dc1ef7.pdf"}, {"id": "eeaca5178795fcbce0e0042549b6bf585033122f", "title": "Flexible X-Y patches for face recognition", "year": 2008, "pdf": "https://doi.org/10.1109/ICASSP.2008.4518059"}, {"id": "a72e5d33361ed5331a4c8cea497df964f9ae6ba7", "title": "Face recognition accuracy of Gabor phase representations at different scales and orientations", "year": 2011, "pdf": "https://doi.org/10.1109/ICMLC.2011.6017000"}, {"id": "4c2fe5997a5570720bac2ab5ce187f1bd2aed1e7", "title": "A comparison of two computer-based face identification systems with human perceptions of faces", "year": 1998, "pdf": "http://pdfs.semanticscholar.org/5d2b/689f5b1e6f798b9b265f4486ef6d43079961.pdf"}, {"id": "25bd32b6332093ebc938b55b7096094f3fd22cdc", "title": "Effects of illumination changes on the performance of Geometrix FaceVision/spl reg/ 3D FRS", "year": 2004, "pdf": null}, {"id": "06da0e4ae21835f0d33cfbf66c8b73b58625c57b", "title": "Facial Keypoints Detection", "year": "2017", "pdf": "https://arxiv.org/pdf/1710.05279.pdf"}, {"id": "1d0cbbe466647286bd73d41032a418b0e2265e7c", "title": "Fusion of face and gait for human recognition", "year": "2008", "pdf": "https://pdfs.semanticscholar.org/1d0c/bbe466647286bd73d41032a418b0e2265e7c.pdf"}, {"id": "3199a528e5e68f48827544abee7fd6b47678dba5", "title": "Human Face Recognition Using Weighted Vote of Gabor Magnitude Filters", "year": "", "pdf": "http://pdfs.semanticscholar.org/3199/a528e5e68f48827544abee7fd6b47678dba5.pdf"}, {"id": "b5476afccf97fc498f51170e65ac9cd9665fd2ce", "title": "Wide Range Face Pose Estimation by Modelling the 3D Arrangement of Robustly Detectable Sub-parts", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/b547/6afccf97fc498f51170e65ac9cd9665fd2ce.pdf"}, {"id": "5a1bf442073b95a3392e673dd14bf73fc1e99f6b", "title": "Internal Report 96{08 Face Recognition by Elastic Bunch Graph Matching Face Recognition by Elastic Bunch Graph Matching", "year": "1997", "pdf": "https://pdfs.semanticscholar.org/5a1b/f442073b95a3392e673dd14bf73fc1e99f6b.pdf"}, {"id": "0f04a95ec885cf98e7cee43eacff13de0c888d3b", "title": "The FERET September 1996 Database and Evaluation Procedure", "year": 1997, "pdf": "http://pdfs.semanticscholar.org/0f04/a95ec885cf98e7cee43eacff13de0c888d3b.pdf"}, {"id": "3bbdfa097a4c39012cb322b23051e360c2f7f023", "title": "Learning Race from Face: A Survey", "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2014.2321570"}, {"id": "2598c02e537b02ce181eea1aa49a698080a391a8", "title": "Improving Recognition Performance for Duplicate Facial Images", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/2598/c02e537b02ce181eea1aa49a698080a391a8.pdf"}, {"id": "0bc7b12b4eb5890e5e9fb45db2c7a801980441cb", "title": "A New Color Image Database for Benchmarking of Automatic Face Detection and Human Skin Segmentation Techniques", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/0bc7/b12b4eb5890e5e9fb45db2c7a801980441cb.pdf"}, {"id": "0735e0b0266d94b670fa6e1b974d3676ef4e3e24", "title": "Face Recognition by Elastic Bunch Graph Matching", "year": 1997, "pdf": "http://pdfs.semanticscholar.org/37ef/a1e61c4a52b191dca26e59e1ce686a89541c.pdf"}, {"id": "905339e771e69cd412f3379c4da02b78ec2a950f", "title": "Learning a Synchronous MAP for Improved Face Recognition", "year": 2004, "pdf": "https://doi.org/10.1109/CVPR.2004.380"}, {"id": "216081f7e3ac058b2bad7609676193f50da24db9", "title": "Misleading first impressions: different for different facial images of the same person.", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/2160/81f7e3ac058b2bad7609676193f50da24db9.pdf"}, {"id": "1746fec6b033f5fc7aa0f8c99e1f238ac3710ee9", "title": "An Extensive Survey on Feature Extraction Techniques for Facial Image Processing", "year": 2014, "pdf": null}, {"id": "8ab2bc0f298cf595d50064a5bce57065d5b69c59", "title": "Development of Multimedia Application for Smartphones", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/da44/8a1d037f9dcd3e41a752d60df7d28df3c74b.pdf"}, {"id": "d68f24e2c8e753d4d1e62f2231f6f33370de24de", "title": "National Biometric Test Center Collected Works", "year": 1997, "pdf": "http://pdfs.semanticscholar.org/d68f/24e2c8e753d4d1e62f2231f6f33370de24de.pdf"}, {"id": "fc9e60f370252bc9a6120a6b2c39703ac1fee810", "title": "Critical Points to Determine Persistence Homology", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.06148.pdf"}, {"id": "e04a5a6860b80e3a7fc293d495f3b9a822c3f98d", "title": "Exploration of the Influence of Smiling on Initial Reactions Across Levels of Facial Attractiveness", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/e04a/5a6860b80e3a7fc293d495f3b9a822c3f98d.pdf"}, {"id": "ce20f81374e2058b01910a4d028b79c07ce7e994", "title": "Discriminating Characteristics of Gabor Phase-Face and Improved Methods for Face Recognition", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/ce20/f81374e2058b01910a4d028b79c07ce7e994.pdf"}, {"id": "3bc514bfcaa3808618ff6b4b52e4001e7a24b82b", "title": "3D Probabilistic Feature Point Model for Object Detection and Recognition", "year": 2007, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2007.383284"}, {"id": "2406f413907b7ef31c086755edbb22a7b7b10f5e", "title": "Evaluation of a facial recognition algorithm across three illumination conditions", "year": "2004", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1346921"}, {"id": "19d27714c820ca7cebf8c6d5a76053b23bcfdf11", "title": "Repeated Measures GLMM Estimation of Subject-Related and False Positive Threshold Effects on Human Face Verification Performance", "year": 2005, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2005.520"}, {"id": "6d28ba098f800caaa46d9273631135bd0318303a", "title": "A hierarchical Bayesian model for pattern recognition", "year": 2012, "pdf": "https://doi.org/10.1109/IJCNN.2012.6252839"}, {"id": "9d357bbf014289fb5f64183c32aa64dc0bd9f454", "title": "Face Identification by Fitting a 3D Morphable Model Using Linear Shape and Texture Error Functions", "year": 2002, "pdf": "http://pdfs.semanticscholar.org/9d35/7bbf014289fb5f64183c32aa64dc0bd9f454.pdf"}, {"id": "d849905418ab2bbb159fbc45be752c50d7852e9e", "title": "Hyperplane arrangements for the fast matching and classification of visual landmarks", "year": 2014, "pdf": "https://doi.org/10.1007/s10044-014-0417-3"}, {"id": "3d42e17266475e5d34a32103d879b13de2366561", "title": "The Global Dimensionality of Face Space", "year": 2000, "pdf": "http://pdfs.semanticscholar.org/7450/7306832bd71884365ed81e1cc7866e47c399.pdf"}, {"id": "9ed76667357681d4c3fb2ec7bcedf566d634d50a", "title": "Facial feature selection for gender recognition based on random decision forests", "year": 2013, "pdf": "https://doi.org/10.1109/SIU.2013.6531267"}, {"id": "869abfc258f5512fd95da179f7d92b624900eadd", "title": "Autonomous face recognition", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/869a/bfc258f5512fd95da179f7d92b624900eadd.pdf"}, {"id": "80fdf9757c0e4b62dcfff03941f1951304ba002c", "title": "Geometry of face space", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/80fd/f9757c0e4b62dcfff03941f1951304ba002c.pdf"}, {"id": "64b78b6f13c321da77e3770a748772fa837aa8c8", "title": "Parallel Architecture for Face Recognition using MPI", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/64b7/8b6f13c321da77e3770a748772fa837aa8c8.pdf"}, {"id": "66e21b99ae3aeae79589d260e3daca3b04981a9c", "title": "Multi-agent Approach towards Face Recognition", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/66e2/1b99ae3aeae79589d260e3daca3b04981a9c.pdf"}, {"id": "4d50de8ec7335ab320f9743d199336699307b523", "title": "The Bochum / USC Face Recognition", "year": 1998, "pdf": "http://pdfs.semanticscholar.org/4d50/de8ec7335ab320f9743d199336699307b523.pdf"}, {"id": "a6a7fa90d44a1afc4217e627cde83704668ec53f", "title": "Automatic Face Recognition using Radial Basis Function Networks", "year": 1997, "pdf": "http://pdfs.semanticscholar.org/a6a7/fa90d44a1afc4217e627cde83704668ec53f.pdf"}, {"id": "b104061d4a39c97fdd11046e93c7512403e9302b", "title": "Measuring External Face Appearance for Face Classification", "year": "2007", "pdf": "http://doi.org/10.5772/4842"}, {"id": "0e7862580028aa80c409b52ef1fa683444afcbcd", "title": "Reducing the Dimensionality of Face Space in a Sparse Distributed Local-Features Representation", "year": 2000, "pdf": "http://pdfs.semanticscholar.org/264a/939a7f33bf14d30f7ce481e0bcad484d9725.pdf"}, {"id": "7db00be42ded44f87f23661c49913f9d64107983", "title": "2d Face Recognition: an Experimental and Reproducible Research Survey", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/7db0/0be42ded44f87f23661c49913f9d64107983.pdf"}, {"id": "8f9f599c05a844206b1bd4947d0524234940803d", "title": "Efficient 3D reconstruction for face recognition", "year": 2004, "pdf": "http://pdfs.semanticscholar.org/8f9f/599c05a844206b1bd4947d0524234940803d.pdf"}, {"id": "467144dfb6c2f52d63f7a012b4680165361e96e7", "title": "Multimodal biometrics: issues in design and testing", "year": 2003, "pdf": "http://w3.antd.nist.gov/pubs/ICMI_submit_4_23_03.pdf"}, {"id": "87842004ea0933f5ba113cc9d78de6b33d444f59", "title": "An innovative face image enhancement based on principle component analysis", "year": "2012", "pdf": "http://doi.org/10.1007/s13042-011-0060-x"}, {"id": "620c3f6605e528503a99a8832200d5afdf156c20", "title": "F2ID: a personal identification system using faces and fingerprints", "year": 1998, "pdf": "http://pdfs.semanticscholar.org/620c/3f6605e528503a99a8832200d5afdf156c20.pdf"}, {"id": "eeae5d972fd093786acebf884a915924f9d66d0d", "title": "Psychology of Religion and Spirituality Christian Religious Badges Instill Trust in Christian and Non-Christian Perceivers", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/eeae/5d972fd093786acebf884a915924f9d66d0d.pdf"}, {"id": "fc7709f209dd66a72050c9c5b105164c228ae438", "title": "Face recognition using the principal components of the scatter matrix in the frequency domain", "year": 2016, "pdf": null}, {"id": "0ecb58abfe0462fecd27fd5eb0605d736e75acf6", "title": "On Classifying Facial Races with Partial Occlusions and Pose Variations", "year": 2017, "pdf": "https://doi.org/10.1109/ICMLA.2017.00-82"}, {"id": "192e439b19824d06bb21ad6bd63cc7a55772549f", "title": "Face recognition using SURF features", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/192e/439b19824d06bb21ad6bd63cc7a55772549f.pdf"}, {"id": "76951c50e5c33132a9de0f8911b77d99d873cd0f", "title": "Face recognition by humans performed on basis of linguistic descriptors and neural networks", "year": 2016, "pdf": "https://doi.org/10.1109/IJCNN.2016.7727877"}, {"id": "e66c1950de149e0ccf90d3796dacce8b4886544d", "title": "Th\u00e8se A contribution to mouth structure segmentation in images aimed towards automatic mouth gesture recognition", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/e66c/1950de149e0ccf90d3796dacce8b4886544d.pdf"}, {"id": "06bcd68dcbd8f7dadc8310fa47d44bce982f4559", "title": "Multi-linear neighborhood preserving projection for face recognition", "year": 2014, "pdf": "https://doi.org/10.1016/j.patcog.2013.08.005"}, {"id": "e000dd1aec1c7b1e9e781ec7ea66f2bde72faa5e", "title": "Ear Recognition: A Complete System", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/e000/dd1aec1c7b1e9e781ec7ea66f2bde72faa5e.pdf"}, {"id": "fab83bf8d7cab8fe069796b33d2a6bd70c8cefc6", "title": "Draft: Evaluation Guidelines for Gender Classification and Age Estimation", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/fab8/3bf8d7cab8fe069796b33d2a6bd70c8cefc6.pdf"}, {"id": "2f348a2ad3ba390ee178d400be0f09a0479ae17b", "title": "Gabor-based kernel PCA with fractional power polynomial models for face recognition", "year": 2004, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2004.1273927"}, {"id": "ae6d2fd7fc75b2eeee47e44a162f661a61b59a34", "title": "A facial recognition method based on DMW transformed partitioned images", "year": 2017, "pdf": null}, {"id": "7a7daa220b80e6f447183ab1e30531bb9eec0e0c", "title": "Subspace methods for face recognition", "year": 2010, "pdf": "https://doi.org/10.1016/j.cosrev.2009.11.003"}, {"id": "1c26e415c7eae2f3b0f49e0519f0d985ec661c63", "title": "Intersection of Longest Paths in Graph Theory and Predicting Performance in Facial Recognition", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/1c26/e415c7eae2f3b0f49e0519f0d985ec661c63.pdf"}, {"id": "3df7401906ae315e6aef3b4f13126de64b894a54", "title": "Robust learning of discriminative projection for multicategory classification on the Stiefel manifold", "year": 2008, "pdf": "http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2008/data/papers/067.pdf"}, {"id": "79581c364cefe53bff6bdd224acd4f4bbc43d6d4", "title": "Descriptors and regions of interest fusion for in- and cross-database gender classification in the wild", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/7958/1c364cefe53bff6bdd224acd4f4bbc43d6d4.pdf"}, {"id": "b463f935ccd91831cb56bff0a27a88c4320f99dd", "title": "Evidence for rostro-caudal functional organization in multiple brain areas related to goal-directed behavior.", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/b463/f935ccd91831cb56bff0a27a88c4320f99dd.pdf"}, {"id": "0d8753db30693a000a2b704bc4f8c7209f716d66", "title": "Robust Fisher Linear Discriminant Model for Dimensionality Reduction", "year": 2006, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICPR.2006.211"}, {"id": "609f8215f0b9546b01aed5d7d12a37d3a72a0c9a", "title": "Employing efficient techniques based on 2D DMWT/FastICA for supervised facial recognition", "year": 2016, "pdf": null}, {"id": "66ec086d1a546939b753671f192d76c06c8b207e", "title": "Non-negative Matrix Factorization on Kernels", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/66ec/086d1a546939b753671f192d76c06c8b207e.pdf"}, {"id": "b4e6c5942ed495c3158ef4bede9b53690b20d0a4", "title": "Eigenspace-based face recognition: a comparative study of different approaches", "year": "2005", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1487580"}, {"id": "578d4ad74818086bb64f182f72e2c8bd31e3d426", "title": "The MR2: A multi-racial, mega-resolution database of facial stimuli.", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/be5b/455abd379240460d022a0e246615b0b86c14.pdf"}, {"id": "7b92d1e53cc87f7a4256695de590098a2f30261e", "title": "From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8575487"}, {"id": "befc6e69cc611aab94ca8217424c83f467605dce", "title": "From Local Pixel Structure to Global Image Super-Resolution: A New Face Hallucination Framework", "year": 2011, "pdf": "https://pdfs.semanticscholar.org/c086/bcc73a9cf6687d58d34996736fdb4966ec40.pdf"}, {"id": "add80c86fa2e73637b5630a9e32644784e59379c", "title": "Expanding Training Data for Facial Image Super-Resolution", "year": 2018, "pdf": "https://doi.org/10.1109/TCYB.2017.2655027"}, {"id": "8e6f67ba883169d6103795d7366a3821843ac758", "title": "A Novel Face Recognition Algorithm with Support Vector Machine Classifier", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/8e6f/67ba883169d6103795d7366a3821843ac758.pdf"}, {"id": "6b0b4df3223315553c1b1d911fbe846681ab56c4", "title": "Face photo-sketch recognition using local and global texture descriptors", "year": 2016, "pdf": "https://doi.org/10.1109/EUSIPCO.2016.7760647"}, {"id": "68bf34e383092eb827dd6a61e9b362fcba36a83a", "title": "Multi-view, High-resolution Face Image Analysis", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/68bf/34e383092eb827dd6a61e9b362fcba36a83a.pdf"}, {"id": "633c851ebf625ad7abdda2324e9de093cf623141", "title": "Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961727"}, {"id": "5a363ac89f93377bb72af4346957ce36a01090c8", "title": "Face Recognition of Database of Compressed Images using Local Binary Patterns", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/6f18/125c7db759d36e6044ddaf7a5e8eece6e233.pdf"}, {"id": "bcd299eb32f17b531fa281cb750a89895cb4feb5", "title": "Computer Vision Research at the Computational Vision Laboratory of the Universidad de Chile", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/bcd2/99eb32f17b531fa281cb750a89895cb4feb5.pdf"}, {"id": "02c38fa9a8ada6040ef21de17daf8d5e5cdc60c7", "title": "Toward an Application of Content-Based Video Indexing to Computer- Assisted Descriptive Video", "year": 2006, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CRV.2006.78"}, {"id": "5b446205c2a9d694bed3b972d8dd26119c7b1ac5", "title": "Eye Center Localization Using Adaptive Templates", "year": 2004, "pdf": "https://www.researchgate.net/profile/Peter_Eisert/publication/4119544_Eye_Center_Localization_Using_Adaptive_Templates/links/09e4151111b861883a000000.pdf"}, {"id": "b6ad9d1a8038e9b4ade4d5bbb26f03e813d53071", "title": "Feature Extractions for Small Sample Size Classification Problem", "year": 2007, "pdf": "https://doi.org/10.1109/TGRS.2006.885074"}, {"id": "01e460b6cf467f774844d92f1d359d0d77e044fe", "title": "A constructive genetic algorithm for LBP in face recognition", "year": 2017, "pdf": null}, {"id": "8b56e33f33e582f3e473dba573a16b598ed9bcdc", "title": "A new ranking method for principal components analysis and its application to face image analysis", "year": 2010, "pdf": "https://doi.org/10.1016/j.imavis.2009.11.005"}, {"id": "7cee802e083c5e1731ee50e731f23c9b12da7d36", "title": "2^B3^C: 2 Box 3 Crop of Facial Image for Gender Classification with Convolutional Networks", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/7cee/802e083c5e1731ee50e731f23c9b12da7d36.pdf"}, {"id": "a6807ee5dec896aed0d76149067ff27f8c5fb2b4", "title": "Design methodology for face detection acceleration", "year": 2013, "pdf": null}, {"id": "4dab3522bbf33f199996069106b514badb4f900a", "title": "Multimodal Biometrics Using Cancelable Feature Fusion", "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CW.2014.45"}, {"id": "2450c618cca4cbd9b8cdbdb05bb57d67e63069b1", "title": "A connexionist approach for robust and precise facial feature detection in complex scenes", "year": 2005, "pdf": "http://liris.cnrs.fr/Documents/Liris-6127.pdf"}, {"id": "4c1dfd32f2ecd994a1dd85adebd4ad98cf27117c", "title": "Face recognition based on the combination of histogram features and rough location information of facial parts", "year": 2008, "pdf": null}, {"id": "c2f8eb9b3414c1777440a0adfcc8c0a3c3b646e4", "title": "Embedded face detection implementation", "year": 2013, "pdf": "http://ieeexplore.ieee.org/document/6617164/"}, {"id": "d8e8730231dc0e77f3ad61385f918df3d93bd266", "title": "Efficient face detection method with eye region judgment", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/d8e8/730231dc0e77f3ad61385f918df3d93bd266.pdf"}, {"id": "79584ed8638392e253ffae2d5ec936239d088285", "title": "An approach to face detection and alignment using hough transformation with convolution neural network", "year": 2016, "pdf": null}, {"id": "9d29ad7576f5ba1622aea3ec3eb14d869015a73f", "title": "An In-depth Examination of Local Binary Descriptors in Unconstrained Face Recognition", "year": "2014", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6977478"}, {"id": "ab69d53372666433738063fd6f3d8a9e88a74b4c", "title": "Face recognition based on geodesic distance approximations between multivariate normal distributions", "year": 2017, "pdf": null}, {"id": "38708f8627ab18d748990b1c32817d5e878c6705", "title": "Computational Science and Its Applications -- ICCSA 2015", "year": "2015", "pdf": "http://doi.org/10.1007/978-3-319-21407-8"}, {"id": "3dda181be266950ba1280b61eb63ac11777029f9", "title": "When Celebrities Endorse Politicians: Analyzing the Behavior of Celebrity Followers in the 2016 U.S. Presidential Election", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/3dda/181be266950ba1280b61eb63ac11777029f9.pdf"}, {"id": "e82693e9e7b1176ecb48a775cf2548e3d68ffd3a", "title": "Linear versus nonlinear neural modeling for 2-D pattern recognition", "year": 2005, "pdf": "https://doi.org/10.1109/TSMCA.2005.851268"}, {"id": "e71e9cbcd34bc169b2d6d083e925bd7f44693ca0", "title": "Performance Analysis of Face Recognition Algorithms on Korean Face Database", "year": 2007, "pdf": "https://doi.org/10.1142/S0218001407005818"}, {"id": "e6639b09a1fda7ddcf3d6ea9b5a011e3467ce381", "title": "When a never-seen but less-occluded image is better recognized: evidence from same-different matching experiments and a model.", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/e663/9b09a1fda7ddcf3d6ea9b5a011e3467ce381.pdf"}, {"id": "bbdd99189aee252adcb0ae80d094e2858c645a78", "title": "Multi-ethnical Chinese facial characterization and analysis", "year": "2018", "pdf": "http://doi.org/10.1007/s11042-018-6018-1"}, {"id": "55ecf12976d0dfff50122ef87b88bd4ae1241305", "title": "Toward a distributed benchmarking tool for biometrics", "year": 2011, "pdf": "https://hal.archives-ouvertes.fr/hal-00993289/document"}, {"id": "2a5efa83ea5c4733757b838b84ba6519f873b826", "title": "A Continuous Learning for Solving a Face Recognition Problem", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/2a5e/fa83ea5c4733757b838b84ba6519f873b826.pdf"}, {"id": "91e58c39608c6eb97b314b0c581ddaf7daac075e", "title": "Pixel-wise Ear Detection with Convolutional Encoder-Decoder Networks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/91e5/8c39608c6eb97b314b0c581ddaf7daac075e.pdf"}, {"id": "8b21c89b436e0aa540f7b9648e4344eb30a6e372", "title": "Robust coding schemes for indexing and retrieval from large face databases", "year": 2000, "pdf": "http://pdfs.semanticscholar.org/c6a9/c92bcc24f08e6fcf6b6291d3690a087ed07d.pdf"}, {"id": "a2c9ddd7e7df99dba7c7b2b522f5c601b341c695", "title": "Eigenspace-based Face Recognition: A comparative study of different hybrid approaches", "year": 2002, "pdf": "http://pdfs.semanticscholar.org/a2c9/ddd7e7df99dba7c7b2b522f5c601b341c695.pdf"}, {"id": "2dab5bddd86b0c1d8dc201c5860b3d5832da87a4", "title": "Interactive Reception Desk with Face Recognition-Based Access Control", "year": 2007, "pdf": "https://doi.org/10.1109/ICDSC.2007.4357517"}, {"id": "0ea2fa4039b6fc733dbc6942642287f141c1121c", "title": "Situation Awareness through Multimodal Biometric Template Security in Real-Time Environments", "year": 2013, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CW.2013.80"}, {"id": "6a552df527057d433b6b68cab7c2b7661132dbcc", "title": "Selection of optimized features and weights on face-iris fusion using distance images", "year": 2015, "pdf": "https://doi.org/10.1016/j.cviu.2015.02.011"}, {"id": "8dc37d4993d2e3ad145ff0959b71ffcfb507e571", "title": "Evaluation Methods in Face Recognition", "year": 2011, "pdf": "https://doi.org/10.1007/978-0-85729-932-1_21"}, {"id": "ee8a75b776f92e797856b3d9a8f2c02421e53ad5", "title": "Biometric hash: high-confidence face recognition", "year": 2006, "pdf": "https://doi.org/10.1109/TCSVT.2006.873780"}, {"id": "6ae02dac51860d9124db22a09a160a6478123b33", "title": "Median ternary pattern (MTP) for face recognition", "year": 2013, "pdf": null}, {"id": "900869f974fda37e74e36f3723217511d76395ac", "title": "Unsupervised Discriminant Projection Analysis for Feature Extr", "year": 2006, "pdf": null}, {"id": "ac2a2c655ad1b3eea3f222b213a774aa25a403c0", "title": "Self-Learning of Feature Regions for Image Recognition", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/ac2a/2c655ad1b3eea3f222b213a774aa25a403c0.pdf"}, {"id": "0636baf08c741c7d0cadc0f88dc70f6bd023b8ee", "title": "Finding distinctive facial areas for face recognition", "year": 2010, "pdf": "https://doi.org/10.1109/ICARCV.2010.5707381"}, {"id": "95aef348d76fe8d6ac628dd62196646894585ee4", "title": "Real Adaboost feature selection for Face Recognition", "year": 2010, "pdf": null}, {"id": "63389206f5cc81990324ee498eb6c25ffcf39831", "title": "VQ-based face recognition algorithm using code pattern classification and Self-Organizing Maps", "year": 2008, "pdf": null}, {"id": "1fbccb842cf34697c98532d69b044463985f90ee", "title": "Constructing PCA Baseline Algorithms to Reevaluate ICA-Based Face-Recognition Performance", "year": 2007, "pdf": "http://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=781393DC492A91E996B002EDC5419A75?doi=10.1.1.500.8611&rep=rep1&type=pdf"}, {"id": "0af10b19c5636ab37609bfc3289fa1b9a40849b6", "title": "Maximum Confidence Hidden Markov Modeling for Face Recognition", "year": 2008, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2007.70715"}, {"id": "7197e4236fe7c24f6b088b16a404128c1bb28a0f", "title": "A hierarchical Bayesian network for face recognition using 2D and 3D facial data", "year": 2015, "pdf": "http://www.cse.nd.edu/~flynn/papers/ChangMMUA.pdf"}, {"id": "07d70e554f7c9d4a22944bc5e0685dfdd813f33a", "title": "A spatiotemporal feature-based approach for facial expression recognition from depth video", "year": 2015, "pdf": "https://doi.org/10.1117/12.2197074"}, {"id": "06d028bd761ad6f29e9f1835d6686d9880706438", "title": "Encoding of facial images into illumination-invariant spike trains", "year": 2012, "pdf": null}, {"id": "4d0c95aa38dc191db17788f6866726e7ceafe3d3", "title": "Locality-constrained framework for face alignment", "year": "2018", "pdf": "http://doi.org/10.1007/s11704-018-6617-z"}, {"id": "8966eb0853cb350aa49e9769013fa3c942d35206", "title": "Robust spike-and-slab deep Boltzmann machines for face denoising", "year": "2018", "pdf": "http://doi.org/10.1007/s00521-018-3866-6"}, {"id": "f5bd11c5c5a455df04b171e37acd1fbdbf3dacd5", "title": "African American and Caucasian males ' evaluation of racialized female facial averages", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/8f15/c3a426d307dd1e72f7feab1e671d20fb1adb.pdf"}, {"id": "d962dc575bcbb054a76385b7f2d446e83f17d1c3", "title": "Illumination compensation method for local matching Gabor face classifier", "year": 2010, "pdf": null}, {"id": "14e949f5754f9e5160e8bfa3f1364dd92c2bb8d6", "title": "Multi-subregion based correlation filter bank for robust face recognition", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/4b76/694ff2efb302074adf1ba6052d643177abd1.pdf"}, {"id": "64d795ac6b90f1b7dd227e7e8f24a614d18aecfc", "title": "Integrating monomodal biometric matchers through logistic regression rank aggregation approach", "year": 2008, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AIPR.2008.4906455"}, {"id": "9ea73660fccc4da51c7bc6eb6eedabcce7b5cead", "title": "Talking head detection by likelihood-ratio test", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/9ea7/3660fccc4da51c7bc6eb6eedabcce7b5cead.pdf"}, {"id": "bd766fb148c419673778cd026a8c4e8c5e4696d2", "title": "Evaluation of wavelet based linear subspace techniques for face recognition", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/bd76/6fb148c419673778cd026a8c4e8c5e4696d2.pdf"}, {"id": "f21a8e372fa12d87fec77d3297afe1e566e229a7", "title": "Nonnegative tensor factorization as an alternative Csiszar\u2013Tusnady procedure: algorithms, convergence, probabilistic interpretations and novel probabilistic tensor latent variable analysis algorithms", "year": 2010, "pdf": "https://doi.org/10.1007/s10618-010-0196-4"}, {"id": "35a2b1a8ad8c67f3bb0961f690f407124e61c9ec", "title": "Gender classification in uncontrolled settings using additive logistic models", "year": 2009, "pdf": "https://doi.org/10.1109/ICIP.2009.5414004"}, {"id": "89d4f8bde129198f41f454b2fd9bf29fce809fdb", "title": "Face detection using discriminating feature analysis and Support Vector Machine", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/89d4/f8bde129198f41f454b2fd9bf29fce809fdb.pdf"}, {"id": "fff0a848b57361e1e99548c95fbc2ec9ae00ce32", "title": "A Robust Approach for Gender Recognition Using Deep Learning", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8494194"}, {"id": "fde0180735699ea31f6c001c71eae507848b190f", "title": "Face Detection and Sex Identification from Color Images using AdaBoost with SVM based Component Classifier", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/fde0/180735699ea31f6c001c71eae507848b190f.pdf"}, {"id": "307ed58717ecc925bdf3f56da4560b44ea6bc312", "title": "Designing A.ne Transformations based Face Recognition Algorithms", "year": 2005, "pdf": "http://www.csee.usf.edu/~pkmohant/papers/frgc_usf_cvpr05.pdf"}, {"id": "1aab5662f57ceb813fbe5233f8b6fb5868f5d9fe", "title": "Sources of interference in recognition testing.", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/1aab/5662f57ceb813fbe5233f8b6fb5868f5d9fe.pdf"}, {"id": "93a451b5efa21887dae43eb73b4793ae3d184138", "title": "A Study on the Effectiveness of Different Patch Size and Shape for Eyes and Mouth Detection", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/93a4/51b5efa21887dae43eb73b4793ae3d184138.pdf"}, {"id": "dd2d952e5dfad764649a1bd9adeaa429f0998c12", "title": "A Combined Approach to Feature Extraction for Mouth Characterization and Tracking", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/dd2d/952e5dfad764649a1bd9adeaa429f0998c12.pdf"}, {"id": "95fb31beaa745557fa33ec4f670ab4e130ff5bf4", "title": "Speeding up spatial approximation search in metric spaces", "year": 2009, "pdf": "http://www.dcc.uchile.cl/~raparede/publ/09jeaiAESA.pdf"}, {"id": "cb2470aade8e5630dcad5e479ab220db94ecbf91", "title": "Exploring Facial Differences in European Countries Boundary by Fine-Tuned Neural Networks", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397018"}, {"id": "29f5ed1693511c636822b42db76639911bdeeda4", "title": "Local maximal margin discriminant embedding for face recognition", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/29f5/ed1693511c636822b42db76639911bdeeda4.pdf"}, {"id": "4f507eb9823b13f1f782d25d6f0592924c087562", "title": "Holistic processing as measured in the composite task does not always go with right hemisphere processing in face perception", "year": 2016, "pdf": "https://doi.org/10.1016/j.neucom.2015.12.018"}, {"id": "76b9fe32d763e9abd75b427df413706c4170b95c", "title": "Gabor feature based robust representation and classification for face recognition with Gabor occlusion dictionary", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/76b9/fe32d763e9abd75b427df413706c4170b95c.pdf"}, {"id": "ac0131ab219891562cad7f5d83ec910216e3a94f", "title": "Novel multimodal template generation algorithm", "year": 2013, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCI-CC.2013.6622228"}, {"id": "7a870f60eebb2c7417140f8ab48028a0f57f7b42", "title": "A new extension of kernel feature and its application for visual recognition", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/7a87/0f60eebb2c7417140f8ab48028a0f57f7b42.pdf"}, {"id": "70769def1284fe88fd57a477cde8a9c9a3dff13f", "title": "Adaptive feature representation for robust face recognition using context-aware approach", "year": 2007, "pdf": "https://doi.org/10.1016/j.neucom.2006.10.036"}, {"id": "3f4b8fe5edfac918c1c74317242b2d91346d5fb6", "title": "Adaptive discriminant analysis for face recognition from single sample per person", "year": 2011, "pdf": "https://pdfs.semanticscholar.org/3f4b/8fe5edfac918c1c74317242b2d91346d5fb6.pdf"}, {"id": "fbc3680f14f057a62420e5771250465768a75557", "title": "Facial features extraction by accelerated implementation of circular hough transform and appearance evaluation", "year": 2015, "pdf": "https://doi.org/10.1109/FCV.2015.7103709"}, {"id": "07d7889bf4b7cb19d4a8d09996311cbec2a3da33", "title": "Facial image super resolution using sparse representation for improving face recognition in surveillance monitoring", "year": 2016, "pdf": "https://doi.org/10.1109/SIU.2016.7495771"}, {"id": "f3a8e2eb5190cadd037b722e7c1190c2d1148f10", "title": "Local Line Derivative Pattern for face recognition", "year": 2012, "pdf": null}, {"id": "110a6a5304c6773773a0cd217f3e9d540b2f698d", "title": "Gender Recognition Based On Combining Facial and Hair Features", "year": 2013, "pdf": "http://doi.acm.org/10.1145/2536853.2536933"}, {"id": "7238e8f34c662af74b81de80fa6c01838a06b349", "title": "Combining SVMS for face class modeling", "year": 2005, "pdf": "https://infoscience.epfl.ch/record/87261/files/Meynet2005_1284.pdf"}, {"id": "4eb0b82b294f601510cd965adcf0e8c386cbaf22", "title": "Face Detection for Augmented Reality Application Using Boosting-based Techniques", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4eb0/b82b294f601510cd965adcf0e8c386cbaf22.pdf"}, {"id": "0c852df66ccd97f42317b8c021424ca6f93721b7", "title": "Modeling face appearance with nonlinear independent component analysis", "year": 2004, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AFGR.2004.1301626"}, {"id": "c924137ca87e8b4e1557465405744f8b639b16fc", "title": "Seeding Deep Learning using Wireless Localization", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.10242.pdf"}, {"id": "a716972c8d3e6c25343c07c01361375bcb34118b", "title": "An Incremental Two-Dimensional Principal Component Analysis for Image Compression and Recognition", "year": 2016, "pdf": "https://doi.org/10.1109/SITIS.2016.121"}, {"id": "666ee08d9486645a3a0a9e5354c248dccd9ba0c0", "title": "Ensemble learning for independent component analysis", "year": "2006", "pdf": "http://doi.org/10.1016/j.patcog.2005.06.018"}, {"id": "4a5014c23e2adb640f4b07b9b47ca2f2a5d427e6", "title": "On the Estimation of Face Recognition System Performance using Image Variabil- ity Information", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/4a50/14c23e2adb640f4b07b9b47ca2f2a5d427e6.pdf"}, {"id": "c9ea71631540dfc13079338fb534c6eb78198d4e", "title": "Automatic Visual Integration: Defragmenting the Face", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/c9ea/71631540dfc13079338fb534c6eb78198d4e.pdf"}, {"id": "1b4b3d0ce900996a6da8928e16370e21d15ed83e", "title": "A Review of Performance Evaluation on 2D Face Databases", "year": 2017, "pdf": "https://doi.org/10.1109/BigDataService.2017.38"}, {"id": "8f077eeeb9678a31e77a17a5c28c36699cf13f83", "title": "Gender Classification of Faces Using Adaboost", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/8f07/7eeeb9678a31e77a17a5c28c36699cf13f83.pdf"}, {"id": "5998a015a5f1b72761256759808f9529a7717058", "title": "A deep transfer learning approach to fine-tuning facial recognition models", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8398162"}, {"id": "bc955487a0b8d2fae3f2f44320389a12ae28f0f5", "title": "Face Sketch–Photo Synthesis and Retrieval Using Sparse Representation", "year": 2012, "pdf": null}, {"id": "46a29a5026142c91e5655454aa2c2f122561db7f", "title": "Margin Emphasized Metric Learning and its application to Gabor feature based face recognition", "year": 2011, "pdf": "https://doi.org/10.1109/FG.2011.5771461"}, {"id": "bf39babab5648ff64cc4b79bfec96e8c6c93b812", "title": "The Impact of Disappointment in Decision Making: Inter-Individual Differences and Electrical Neuroimaging", "year": "2010", "pdf": "https://pdfs.semanticscholar.org/b3b0/4c872d83a55ac5787829edcae98c6a27c64e.pdf"}, {"id": "fd01b62a8bb76798e93cf927e3aa823961af81fd", "title": "Region-based eigentransformation for face image hallucination", "year": 2009, "pdf": "https://doi.org/10.1109/ISCAS.2009.5118032"}, {"id": "99127b39f7671607b1fc9d862a9af0ce9beedaa4", "title": "Hallucination space relationship learning to improve very low resolution face recognition", "year": 2015, "pdf": "https://doi.org/10.1109/ACPR.2015.7486455"}, {"id": "7166c7a4d1e90876899d61e3fa4e30beb5f1ec9e", "title": "State of the Art in 3 D Face Recognition RR 05 126 D RAFT", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/7166/c7a4d1e90876899d61e3fa4e30beb5f1ec9e.pdf"}, {"id": "7cb3eac464ad6607819f45e920c4784ad631e0b9", "title": "Boosting performance for 2D Linear Discriminant Analysis via regression", "year": 2008, "pdf": "https://doi.org/10.1109/ICPR.2008.4761898"}, {"id": "4850e40b0e69e30723cb027fdc4a38ee1322589b", "title": "Detec\u00e7\u0303ao de Landmarks Faciais Usando SVM", "year": "2011", "pdf": "https://pdfs.semanticscholar.org/4850/e40b0e69e30723cb027fdc4a38ee1322589b.pdf"}, {"id": "15dc50e3bd1063c3760e1c17177a0e898175c61e", "title": "Visual routines for eye location using learning and evolution", "year": 2000, "pdf": "https://doi.org/10.1109/4235.843496"}, {"id": "30ff70a3afea6b6b46bde883ca1ade0e932bbe71", "title": "Image Parsing: Unifying Segmentation, Detection, and Recognition", "year": 2003, "pdf": "http://pages.ucsd.edu/~ztu/publication/IJCV_parsing.pdf"}, {"id": "084f1a6c62a3464b1a9b745fee40af2895920301", "title": "Capitalize on dimensionality increasing techniques for improving face recognition grand challenge performance", "year": 2006, "pdf": "https://frvp.njit.edu/images/new-slider/TPAMI06-DIF.pdf"}, {"id": "63fac08993df90719a52fe3f1c928a03bddc333d", "title": "Nonlinear DCT Discriminant Feature Extraction with Generalized KDCV for Face Recognition", "year": 2008, "pdf": null}, {"id": "6b18628cc8829c3bf851ea3ee3bcff8543391819", "title": "Face recognition based on subset selection via metric learning on manifold", "year": 2015, "pdf": "http://engineering.cae.cn/fitee/fileup/2095-9184/SUPPL/20151221082702_2.pdf"}, {"id": "8016233d289f4d7da40f3fcae550b29dce2acfc1", "title": "C3 Effective features inspired from Ventral and dorsal stream of visual cortex for view independent face recognition", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/c365/a850a864a8d811395722c1c2ec286eb8002b.pdf"}, {"id": "7a99b250f070e5adba3ed9483ea7ea47fc71ac8c", "title": "Efficient face recognition using wavelet-based generalized neural network", "year": 2013, "pdf": "https://doi.org/10.1016/j.sigpro.2012.09.012"}, {"id": "d3ae825a19b7a0b1645412368403814dbebcd113", "title": "Application of two-dimensional canonical correlation analysis for face image processing and recognition", "year": 2010, "pdf": null}, {"id": "bde8271f6f9afb58008c3409cee9b1f8a81a0e3c", "title": "Aggregation of classifiers based on image transformations in biometric face recognition", "year": 2007, "pdf": "https://doi.org/10.1007/s00138-007-0088-9"}, {"id": "3399021c2cd7bc4f46494c5aa38bac82ef290e7b", "title": "Robust face recognition after plastic surgery using region-based approaches", "year": 2015, "pdf": "https://doi.org/10.1016/j.patcog.2014.10.004"}, {"id": "36f039e39efde3558531b99d85cd9e3ab7d396b3", "title": "Efficiency of Recognition Methods for Single Sample per Person Based Face Recognition", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/6d42/4349f2a2eeb4a74c76b737e7a7e0693f0c5d.pdf"}, {"id": "686dfa9c12ff5bf0a3b1707c6637dc8f4a606fa0", "title": "Gabor Feature Based Classification Using 2D Linear Discriminant Analysis for Face Recognition", "year": 2005, "pdf": "https://doi.org/10.1007/11527923_97"}, {"id": "0cf07a503b80f630c6774efb126df282a7c93f1d", "title": "Advances in Computational Intelligence", "year": "2017", "pdf": "http://doi.org/10.1007/978-3-319-59153-7"}, {"id": "537fbb3d5216333bcc91730f3ee61ff71f710e37", "title": "An Improved Face Recognition Algorithm Using Quantized DCT Coefficients", "year": 2011, "pdf": "http://doi.ieeecomputersociety.org/10.1109/SITIS.2011.62"}, {"id": "abf0e3ce602287875a2d1ddf1bb4ab76eceab977", "title": "Face recognition using generalized self organising map based on PCNN features", "year": 2015, "pdf": null}, {"id": "2d20ccac97cb11460097c10c90feebac3887f92d", "title": "A data association approach to detect and organize people in personal photo collections", "year": 2011, "pdf": "https://www.researchgate.net/profile/Marco_Morana2/publication/230603446_A_data_association_approach_to_detect_and_organize_people_in_personal_photo_collections/links/02e7e52cdabae3881b000000.pdf"}, {"id": "6d994076a6ef3b6e74e2a0149af759e48b71f9a0", "title": "Could dynamic attractors explain associative prosopagnosia?", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/6d99/4076a6ef3b6e74e2a0149af759e48b71f9a0.pdf"}, {"id": "d7b6bbb94ac20f5e75893f140ef7e207db7cd483", "title": "griffith . edu . au Face Recognition across Pose : A Review", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/d7b6/bbb94ac20f5e75893f140ef7e207db7cd483.pdf"}, {"id": "b6aaaf6290ba0ca13be61d122907617f1ea86315", "title": "Embedded Face Recognition Using Cascaded Structures PROEFSCHRIFT", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/b6aa/af6290ba0ca13be61d122907617f1ea86315.pdf"}, {"id": "22032c78ff1e89b45a815f5caca35756d935dd5e", "title": "Face recognition in JPEG and JPEG2000 compressed domain", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/2203/2c78ff1e89b45a815f5caca35756d935dd5e.pdf"}, {"id": "d3afd1ee9bd0402775227505ea506cc52a0c18f9", "title": "Enhancing user authentication of online credit card payment using face image comparison with MPEG7-edge histogram descriptor", "year": 2015, "pdf": null}, {"id": "829f390b3f8ad5856e7ba5ae8568f10cee0c7e6a", "title": "A Robust Rotation Invariant Multiview Face Detection in Erratic Illumination Condition", "year": "2012", "pdf": "https://pdfs.semanticscholar.org/bbf4/f0ce0838c8eec048e3a9b212053fd98dde5a.pdf"}, {"id": "b542f58c1c9abe627638e9267040b397b6668374", "title": "False alarm rate: a critical performance measure for face recognition", "year": 2004, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AFGR.2004.1301529"}, {"id": "5f7ae1e13c00bb58856374b9888f1ddf73c6865e", "title": "Gender classification using face image and voice", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/5f7a/e1e13c00bb58856374b9888f1ddf73c6865e.pdf"}, {"id": "2cf40b684c0cb789ed8d9f8eee0576f3b3fec98e", "title": "Robust Selectivity for Faces in the Human Amygdala in the Absence of Expressions", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/2cf4/0b684c0cb789ed8d9f8eee0576f3b3fec98e.pdf"}, {"id": "3333b35ddb698be76dd27bffad131c1daa694bf2", "title": "Comparing Robustness of Two-Dimensional PCA and Eigenfaces for Face Recognition", "year": "2004", "pdf": "https://pdfs.semanticscholar.org/3333/b35ddb698be76dd27bffad131c1daa694bf2.pdf"}, {"id": "5e6ba16cddd1797853d8898de52c1f1f44a73279", "title": "Face Identification with Second-Order Pooling", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/5e6b/a16cddd1797853d8898de52c1f1f44a73279.pdf"}, {"id": "f4140100f311ba899b8ba661de17600e91cbb129", "title": "An Active Vision System for Multitarget Surveillance in Dynamic Environments", "year": 2007, "pdf": "https://doi.org/10.1109/TSMCB.2006.883423"}, {"id": "5bac3437dcdfe482b406800f38c0b9a5ca9223ee", "title": "Multiresolution based Kernel Fisher Discriminant Model for Face Recognition", "year": 2007, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ITNG.2007.131"}, {"id": "a78cf9ee6571b99c74e330dfe8d357e51fd8cd28", "title": "Random subspace support vector machine ensemble for reliable face recognition", "year": 2014, "pdf": "https://doi.org/10.1504/IJBM.2014.059636"}, {"id": "b27322d8b0984ce3b756f2b7c43ea698d635f2ef", "title": "(WKSP) On the Potential of Data Extraction by Detecting Unaware Facial Recognition with Brain-Computer Interfaces", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8457703"}, {"id": "c4cb04972f42ae2bdf26cff0c84959dc0b5178f1", "title": "The other-race effect for face perception: an event-related potential study", "year": 2007, "pdf": null}, {"id": "745b77016f972288925602d0d67bbba7d2c0fee9", "title": "Score level fusion scheme based on adaptive local Gabor features for face-iris-fingerprint multimodal biometric", "year": 2014, "pdf": "https://doi.org/10.1117/1.JEI.23.3.033019"}, {"id": "c3561494f98929d8019b0b0e9481b2e79424f60c", "title": "Object Localization Based on Global Structure Constraint Model and Particle Swarm Optimization", "year": 2008, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CSE.2008.28"}, {"id": "6c62c592d9d3090b446cf76147468a6190431b3d", "title": "Gabor feature based classification using statistical models for face recognition", "year": "2010", "pdf": "http://doi.org/10.1016/j.procs.2010.11.011"}, {"id": "7c23bdcee91be4426c77f580e79462ffe7f52df0", "title": "Face recognition methods for multimodal interface", "year": 2012, "pdf": "https://doi.org/10.1109/WMNC.2012.6416164"}, {"id": "d590ca357910532cc62eeacc56af8f86b9fe642b", "title": "Metric Spaces Library", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/d590/ca357910532cc62eeacc56af8f86b9fe642b.pdf"}, {"id": "3c8aa33ccff8f959df28e4e883867af32e7b4b78", "title": "The impact of task relevance and degree of distraction on stimulus processing", "year": 2013, "pdf": "https://opus.bibliothek.uni-wuerzburg.de/opus4-wuerzburg/frontdoor/deliver/index/docId/9727/file/Biehl_1471-2202-14-107.pdf"}, {"id": "7f3a73babe733520112c0199ff8d26ddfc7038a0", "title": "Robust Face Identification with Small Sample Sizes using Bag of Words and Histogram of Oriented Gradients", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/7f3a/73babe733520112c0199ff8d26ddfc7038a0.pdf"}, {"id": "c29e33fbd078d9a8ab7adbc74b03d4f830714cd0", "title": "3D shape constraint for facial feature localization using probabilistic-like output", "year": 2004, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AFGR.2004.1301548"}, {"id": "ee6e5a67c0504f77ec96121106ae88488da0536b", "title": "A new method for combined face detection and identification using interest point descriptors", "year": "2011", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771452"}, {"id": "c4c41bf777a1dc0ced43ee64bee683be092e297f", "title": "Face identification using linear regression", "year": "2009", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5414539"}, {"id": "a9d1d00d6897ae23c9a7e9fb75a3c7417a6730a4", "title": "Low-complexity face recognition using contour-based binary descriptor", "year": 2017, "pdf": "https://doi.org/10.1049/iet-ipr.2016.1074"}, {"id": "4d0ef449de476631a8d107c8ec225628a67c87f9", "title": "Face system evaluation toolkit: Recognition is harder than it seems", "year": 2010, "pdf": "http://www.wjscheirer.com/papers/wjs_btas2010b_photohead.pdf"}, {"id": "75fd35b2048407f93d7bada8edbb7628d5a952bc", "title": "Multilinear locality preserving canonical correlation analysis for face recognition", "year": 2011, "pdf": "https://doi.org/10.1109/ICICS.2011.6174289"}, {"id": "1ac8cb17c278fde95d76cdf066720195d115d265", "title": "Human face recognition based on multidimensional PCA and extreme learning machine", "year": 2011, "pdf": "https://doi.org/10.1016/j.patcog.2011.03.013"}, {"id": "35047c9667a4c5160f9f40f16b455839e63662ff", "title": "Color Object Recognition Based On Clifford Fourier Transform", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/5fce/66e13c7534c82525e614c61b28286fb9f673.pdf"}, {"id": "b083f43b0a1d4174f3718582f9904db8b6275024", "title": "The Dorsal Attention Network Reflects Both Encoding Load and Top\u2013down Control during Working Memory", "year": "2018", "pdf": "http://doi.org/10.1162/jocn_a_01195"}, {"id": "668e8a5f2170c9ec921ea55b1d58d87223a04b36", "title": "Selecting training sets for support vector machines: a review", "year": 2017, "pdf": null}, {"id": "7b4e42f4dac397e5214d957f75832e58d6f28299", "title": "Combining null space-based Gabor features for face recognition", "year": 2004, "pdf": "http://www.sinobiometrics.com/publications/wfan/ICPR2004.pdf"}, {"id": "ff62415c8685d6beb698960b7db3498b61ead423", "title": "Improving kernel Fisher discriminant analysis for face recognition", "year": 2004, "pdf": null}, {"id": "ce073cb70eec80d87c9e07a4ec2d4162d91e23a6", "title": "Positive Definite Matrices: Data Representation and Applications to Computer Vision", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/ce07/3cb70eec80d87c9e07a4ec2d4162d91e23a6.pdf"}, {"id": "2d28fdb8e067b15c70a02729f49b23d75551a300", "title": "Gabor Wavelet Feature Based Face Recognition Using the Fractional Power Polynomial Kernel Fisher Discriminant Model", "year": 2007, "pdf": null}, {"id": "aa1e91a7f166d9a19b228d66f74e7de5b4448a0e", "title": "Seeing Blobs as Faces or Letters: Modeling Effects on Discrimination", "year": 2004, "pdf": "http://pdfs.semanticscholar.org/aa1e/91a7f166d9a19b228d66f74e7de5b4448a0e.pdf"}, {"id": "9e194413f10ea3385f063de87e15287072ac357b", "title": "An a-contrario Approach for Face Matching", "year": "2014", "pdf": "https://pdfs.semanticscholar.org/9e19/4413f10ea3385f063de87e15287072ac357b.pdf"}, {"id": "65696f1beb82be12e7e358bb0e5109dedf004783", "title": "Multi-view Discriminative Manifold Embedding for Pattern Classification", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/6569/6f1beb82be12e7e358bb0e5109dedf004783.pdf"}, {"id": "d4b88be6ce77164f5eea1ed2b16b985c0670463a", "title": "A Survey of Different 3D Face Reconstruction Methods", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d4b8/8be6ce77164f5eea1ed2b16b985c0670463a.pdf"}, {"id": "6d9164bc6a483ccb55284cddd9024bb8831dcc35", "title": "Complexity Reduced Face Detection Using Probability-Based Face Mask Prefiltering and Pixel-Based Hierarchical-Feature Adaboosting", "year": 2011, "pdf": "https://doi.org/10.1109/LSP.2011.2146772"}, {"id": "4338ccdbc0407d0ee8fda202c251163e7be49c90", "title": "Cost Component Analysis", "year": 2003, "pdf": "https://doi.org/10.1142/S0129065703001558"}, {"id": "5ce63df36f893d2ea13b58a20e58ebc0854ef4f8", "title": "A Kernel Two-Phase Test Sample Sparse Representation for Face Recognition", "year": 2016, "pdf": "https://doi.org/10.1142/S0218001416560012"}, {"id": "d6bcc9f24176714fc72dae4cad88c12d9825a70d", "title": "Sparse Representation Shape Models", "year": 2012, "pdf": "https://doi.org/10.1007/s10851-012-0394-3"}, {"id": "cb254fcaa1219f829793ac4a0a1f4c858d853c04", "title": "Smart pass automation system", "year": 2016, "pdf": "https://doi.org/10.1109/SIU.2016.7495718"}, {"id": "964e43f4983a42ef3790c265bdce42c1fce56d79", "title": "A Virtual Environment Tool for Benchmarking Face Analysis Systems", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/ea01/9ddae0b7da579491392a0131945fefa132cb.pdf"}, {"id": "df30fd76934092a4aacb850bf5ae5520b22083ef", "title": "A comparative evaluation of iris and ocular recognition methods on challenging ocular images", "year": "2011", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6117500"}, {"id": "224a88f8b4afe8e6dfe938a8baef491c5dabd85c", "title": "Weighted linear embedding: utilizing local and nonlocal information sufficiently", "year": 2011, "pdf": "https://doi.org/10.1007/s00521-011-0528-3"}, {"id": "3eafadfd496f63df4eabbde15fe1add55d82ee53", "title": "Matching Pursuit Filters Applied To Face Identification - Image Processing, IEEE Transactions on", "year": 1998, "pdf": "http://pdfs.semanticscholar.org/3eaf/adfd496f63df4eabbde15fe1add55d82ee53.pdf"}, {"id": "bb34bcf28021a658ce89d65d229df76d4dc620c6", "title": "Computer Models for Facial Beauty Analysis", "year": 2016, "pdf": "https://doi.org/10.1007/978-3-319-32598-9"}, {"id": "6163381244823241373f6741a282f2c4a868b59c", "title": "Multimodal biometrics for identity documents (MBioID).", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/6163/381244823241373f6741a282f2c4a868b59c.pdf"}, {"id": "6075c07ecb29d551ffa474c3eca45f2da5fd5007", "title": "Shallow convolutional neural network for eyeglasses detection in facial images", "year": 2017, "pdf": null}, {"id": "b4a16fe1469f2048d031db626b7cbc5bec7f4055", "title": "Online Facial Caricature Generator", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/b4a1/6fe1469f2048d031db626b7cbc5bec7f4055.pdf"}, {"id": "8c98f67bb17c94d4d7392dba0774e2e931451db6", "title": "Gender recognition from facial images: two or three dimensions?", "year": 2016, "pdf": null}, {"id": "0dca6aa1c0143aa190973fb2256c16d700992473", "title": "An introduction to the good, the bad, & the ugly face recognition challenge problem", "year": 2011, "pdf": "http://www.cs.colostate.edu/~draper/papers/phillips_fg11.pdf"}, {"id": "2921719b57544cfe5d0a1614d5ae81710ba804fa", "title": "Face Recognition Enhancement Based on Image File Formats and Wavelet De - noising", "year": "", "pdf": "http://pdfs.semanticscholar.org/2921/719b57544cfe5d0a1614d5ae81710ba804fa.pdf"}, {"id": "f442a2f2749f921849e22f37e0480ac04a3c3fec", "title": "Critical Features for Face Recognition in Humans and Machines", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/f442/a2f2749f921849e22f37e0480ac04a3c3fec.pdf"}, {"id": "3c1416dd18bededa58d3ac6d2e1f3658ef6cf961", "title": "Active-vision system for multi-target surveillance", "year": 2005, "pdf": null}, {"id": "ffd78f26ce4abf3df539b0f275f100000123a6c1", "title": "Comparative analysis of simple facial features extractors", "year": 2007, "pdf": "https://doi.org/10.1007/s11554-007-0030-4"}, {"id": "d8119108c849aacbaff404d7585ef583e09fb894", "title": "Gender Classification from Face Images Based on Gradient Directional Pattern (GDP)", "year": 2015, "pdf": "https://doi.org/10.1007/978-3-319-21407-8_18"}, {"id": "1d122efc1469e96d71497f0931f502fa47ae344d", "title": "Have We Met Before? Using Consumer-Grade Brain-Computer Interfaces to Detect Unaware Facial Recognition", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3180661"}, {"id": "01122cf389ad64c53396fc20f347f218cbfd96c9", "title": "A Simple High Accuracy Approach for Face Recognition", "year": 2007, "pdf": "http://startrinity.com/VideoRecognition/Resources/Simple%20high%20accuracy%20approach%20to%20face%20recognition.pdf"}, {"id": "5b73bc1660b7eef0c12694db935854dba0829f9e", "title": "A Probabilistic Model for Face Transformation with Application to Person Identification", "year": 2004, "pdf": "http://pdfs.semanticscholar.org/5b73/bc1660b7eef0c12694db935854dba0829f9e.pdf"}, {"id": "fd1b917476b114919de0ae1b6a4b96a52a410c20", "title": "A Memory Based Face Recognition Method", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/fd1b/917476b114919de0ae1b6a4b96a52a410c20.pdf"}, {"id": "5437b5cfb0f8eda908559a16e7ed7d7b64be641b", "title": "Face recognition by stepwise nonparametric margin maximum criterion", "year": 2005, "pdf": "http://www.cs.fudan.edu.cn/mcwil/~xpqiu/Papers/ICCV05_SNMMC.pdf"}, {"id": "02cfef55937103669a40710404ab5f0afd4498e8", "title": "Face Verification Competition on the XM2VTS Database", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/02cf/ef55937103669a40710404ab5f0afd4498e8.pdf"}, {"id": "3a564db6701cc64a4bf336e6012ada0952fed02d", "title": "Comparative Study of Diverse Face Recognition Approaches along with Intrinsic Worth and Recognition Rate", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/2643/a611f10ce8acfef41e1e49d1be04d5d34190.pdf"}, {"id": "d9984fac91cb4f469cf36f140b6c8c07c45afe6f", "title": "Face Recognition Using LAPP Algorithm", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/d998/4fac91cb4f469cf36f140b6c8c07c45afe6f.pdf"}, {"id": "5efbb135fd3a49de6ca1b69ef583d1bcfe761043", "title": "Micha \u0142 KAWULOK * APPLICATION OF SUPPORT VECTOR MACHINES IN AUTOMATIC HUMAN FACE RECOGNITION", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/5efb/b135fd3a49de6ca1b69ef583d1bcfe761043.pdf"}, {"id": "d38c564429502f0efece11e212bb1be03f05495d", "title": "Tracking Deceased-Related Thinking with Neural Pattern Decoding of a Cortical-Basal Ganglia Circuit.", "year": "2017", "pdf": "https://www.ncbi.nlm.nih.gov/pubmed/28730182"}, {"id": "3076adf64a28eea92308541978f8ed6907fe2672", "title": "The functional architecture for face-processing expertise: FMRI evidence of the developmental trajectory of the core and the extended face systems.", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/31b4/6e4c6b487fd36d364a64582ce4ac809ddcd1.pdf"}, {"id": "18c72175ddbb7d5956d180b65a96005c100f6014", "title": "From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose", "year": 2001, "pdf": "http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf"}, {"id": "6d66c98009018ac1512047e6bdfb525c35683b16", "title": "Face Recognition Based on Fitting a 3D Morphable Model", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/6d66/c98009018ac1512047e6bdfb525c35683b16.pdf"}, {"id": "4a763a6e161d372f17fa591c35932c4f62e3e12b", "title": "Facial feature extraction by a cascade of model-based algorithms", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/4a76/3a6e161d372f17fa591c35932c4f62e3e12b.pdf"}, {"id": "5367610430dc0380dfbe8344e08537267875968c", "title": "Tracking 3D Surfaces Using Multiple Cameras: A Probabilistic Approach", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/5367/610430dc0380dfbe8344e08537267875968c.pdf"}, {"id": "82b8a0c3e8f46ab0af5179a9d9a6b137405096d3", "title": "Generalized Low-Rank Approximations of Matrices Revisited", "year": 2010, "pdf": "https://cs.nju.edu.cn/zhouzh/zhouzh.files/publication/tnn10.pdf"}, {"id": "5dc056fe911a3e34a932513abe637076250d96da", "title": "Real-time facial feature detection using conditional regression forests", "year": 2012, "pdf": "http://www.vision.ee.ethz.ch/~gfanelli/pubs/cvpr12.pdf"}, {"id": "e506788b911aa7f55f44e047b79a242334167d5a", "title": "Towards a face recognition method based on uncorrelated discriminant sparse preserving projection", "year": "2015", "pdf": "http://doi.org/10.1007/s11042-015-2882-0"}, {"id": "a65301ec723dfac73c1e884d26dedeb4de309429", "title": "Incremental generalized low rank approximation of matrices for visual learning and recognition", "year": 2015, "pdf": null}, {"id": "1182323392b11a3de72b403c1ba3fba34f547faf", "title": "Measuring face familiarity and its application to face recognition", "year": "2012", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6163042"}, {"id": "3ce0f48106fd872e2c2327596c2677c4680444a2", "title": "Use of Vertical Face Profiles for Text Dependent Audio-Visual Biometric Person Authentication", "year": 2002, "pdf": "http://pdfs.semanticscholar.org/3ce0/f48106fd872e2c2327596c2677c4680444a2.pdf"}, {"id": "35ccf703df2dd37fc857ab9b438e18c9a65ba5b9", "title": "Two-step supervised confidence measure for automatic face recognition", "year": 2014, "pdf": "https://doi.org/10.1109/MLSP.2014.6958883"}, {"id": "30d5bbfb35a0d534c39ba709a3c625e9965d2985", "title": "Assessing face image quality for smartphone based face recognition system", "year": 2017, "pdf": "https://doi.org/10.1109/IWBF.2017.7935089"}, {"id": "b755505bdd5af078e06427d34b6ac2530ba69b12", "title": "NFRAD: Near-Infrared Face Recognition at a Distance", "year": 2011, "pdf": "http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/Maengetal_NIFaceRecognitionDistance_IJCB11.pdf"}, {"id": "50c5cd1f0af228beb5e19efc897e691c1493e509", "title": "Discriminative local binary pattern", "year": "2016", "pdf": "http://doi.org/10.1007/s00138-016-0780-8"}, {"id": "0b32cd6dff1561889652450cac5c1abb9833557d", "title": "Face Verification in Polar Frequency Domain: A Biologically Motivated Approach", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/0b32/cd6dff1561889652450cac5c1abb9833557d.pdf"}, {"id": "217d22a63b45bb5960c614ede827fb5c532e0add", "title": "Neural mechanisms of concurrent stimulus processing in dual tasks", "year": 2009, "pdf": "https://doi.org/10.1016/j.neuroimage.2009.06.064"}, {"id": "abb4b8f9df14f7b15aa43920d0329eccada33b97", "title": "LBP Yard\u0131m\u0131yla Go\u0308ru\u0308ntu\u0308deki Kis\u0327inin Yas\u0327\u0131n\u0131n Bulunmas\u0131", "year": "2011", "pdf": "https://pdfs.semanticscholar.org/abb4/b8f9df14f7b15aa43920d0329eccada33b97.pdf"}, {"id": "2a0a335d4b2183b03ea8dfc27e31a1891771c4e6", "title": "Glasses detection by boosting simple wavelet features", "year": "2004", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1334110"}, {"id": "6d10c110adaa381084c0b8ed2fd391d1fcd84a03", "title": "Feature Extraction Based on Equalized ULBP for Face Recognition", "year": 2012, "pdf": null}, {"id": "f6488c7741eaddf6cbbd0b0728c48fd8ee1c00a4", "title": "Super-Resolution of Face Images Using Kernel PCA-Based Prior", "year": 2007, "pdf": "http://www.ee.iitm.ac.in/~raju/journals/j29.pdf"}, {"id": "eec466317c83e8093a32b978e753c3fc8f21d21b", "title": "Performance Characterization in Computer Vision A Tutorial", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/eec4/66317c83e8093a32b978e753c3fc8f21d21b.pdf"}, {"id": "e5c22faf19bd7ab529ec7c39e4f46e8116211679", "title": "Hallucinating Face in the DCT Domain", "year": 2011, "pdf": "https://doi.org/10.1109/TIP.2011.2142001"}, {"id": "bdbded88916307e282a22dadb7954a4942300908", "title": "Lean histogram of oriented gradients features for effective eye detection", "year": 2015, "pdf": "https://doi.org/10.1117/1.JEI.24.6.063007"}, {"id": "120b9c271c3a4ea0ad12bbc71054664d4d460bc3", "title": "Face Recognition against Mouth Shape Variations", "year": 2015, "pdf": "https://doi.org/10.1109/DICTA.2015.7371259"}, {"id": "05fa0ce63fb55a848d5ecd6cd1c45d3174dd889a", "title": "Making FLDA applicable to face recognition with one sample per person", "year": 2004, "pdf": "http://pdfs.semanticscholar.org/05fa/0ce63fb55a848d5ecd6cd1c45d3174dd889a.pdf"}, {"id": "b2749caec0094e186d3ee850151c899b8508f47a", "title": "AVIUE — Artificial vision to improve the user experience", "year": 2013, "pdf": null}, {"id": "178a82e3a0541fa75c6a11350be5bded133a59fd", "title": "BioHDD: a dataset for studying biometric identification on heavily degraded data", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/178a/82e3a0541fa75c6a11350be5bded133a59fd.pdf"}, {"id": "df3d69acbaf1121798d5c7eddd8094fca3635466", "title": "Image reduction operators based on non-monotonic averaging functions", "year": 2013, "pdf": "https://doi.org/10.1109/FUZZ-IEEE.2013.6622458"}, {"id": "18eb8e376ea07f8ff68f7eaee16e34fcb0c4b23f", "title": "Boosting dense SIFT descriptors and shape contexts of face images for gender recognition", "year": 2010, "pdf": "https://doi.org/10.1109/CVPRW.2010.5543238"}, {"id": "6dd850acb928457ffd44e5d9dceb7946a7f0c6ee", "title": "Template-based matching using weight maps", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/6dd8/50acb928457ffd44e5d9dceb7946a7f0c6ee.pdf"}, {"id": "11bda1f054effb3116115b0699d74abec3e93a4b", "title": "Improving the performance of MDA by finding the best subspaces dimension based on LDA for face recognition", "year": 2011, "pdf": null}, {"id": "13425bb41d326982ec6b3c6f3034aa978a1300ac", "title": "Face Recognition for Smart Environments", "year": 2000, "pdf": "http://pdfs.semanticscholar.org/1342/5bb41d326982ec6b3c6f3034aa978a1300ac.pdf"}, {"id": "05a35959c6822155bee927305e799613ce99cc2f", "title": "New valve and bonding designs for microfluidic biochips containing proteins.", "year": "2007", "pdf": "https://pdfs.semanticscholar.org/05a3/5959c6822155bee927305e799613ce99cc2f.pdf"}, {"id": "e0b6defafb69fa3bbaec279b3cba92edf50c760b", "title": "R-theta local neighborhood pattern for unconstrained facial image recognition and retrieval", "year": "2018", "pdf": "http://doi.org/10.1007/s11042-018-6846-z"}, {"id": "91a5897565818631a32ce4edae5548d2baf99d77", "title": "APPROACH TO RECOGNIZING FACES UNDER VARYING POSE GIVEN A SINGLE-VIEW", "year": "2012", "pdf": "https://pdfs.semanticscholar.org/91a5/897565818631a32ce4edae5548d2baf99d77.pdf"}, {"id": "cae17fd058a031515ad0aa6c0f5da5fcb5f681b5", "title": "Towards Robust Evaluation of Face Morphing Detection", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8553018"}, {"id": "f0a0e963f1ddd8a0b3269392e3d67043d2ace7d0", "title": "Roweis-Saul Classifier for Machine Learning", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/f0a0/e963f1ddd8a0b3269392e3d67043d2ace7d0.pdf"}, {"id": "16bd796687ca17ac7ca28d28d856b324186628ba", "title": "Face Recognition and Verification Using Photometric Stereo: The Photoface Database and a Comprehensive Evaluation", "year": 2013, "pdf": "https://doi.org/10.1109/TIFS.2012.2224109"}, {"id": "a2611b92b000d4a0d9918ff282c719dc7f82c600", "title": "Genetic-Based Type II Feature Extraction for Periocular Biometric Recognition: Less is More", "year": 2010, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICPR.2010.59"}, {"id": "a46b950e1aa97ab3033d8a21fabb1952fb7eb5ce", "title": "Mixtures of boosted classifiers for frontal face detection", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/a46b/950e1aa97ab3033d8a21fabb1952fb7eb5ce.pdf"}, {"id": "93d74b1315a09f568027b6d8b3068ef048d17889", "title": "Pose-invariant features and personalized correspondence learning for face recognition", "year": 2017, "pdf": null}, {"id": "26ff4aaee89d0f87367ddfb466d8f8d308c22fed", "title": "Computer Vision - ACCV 2014 Workshops", "year": 2014, "pdf": "https://doi.org/10.1007/978-3-319-16631-5"}, {"id": "f5450d5b7c9352deaa1926feb240f644cf27bc23", "title": "Down-Sampling Face Images and Low-Resolution Face Recognition", "year": 2008, "pdf": null}, {"id": "214ac8196d8061981bef271b37a279526aab5024", "title": "Face Recognition Using Smoothed High-Dimensional Representation", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/214a/c8196d8061981bef271b37a279526aab5024.pdf"}, {"id": "234318640a05a7e30b84f1743f2b29d63ed109b7", "title": "Face recognition based on random feature", "year": 2015, "pdf": "https://doi.org/10.1109/VCIP.2015.7457869"}, {"id": "ae3a81e69ef3ffc22017cec5bb2c5ea26114ce2b", "title": "A weighted voting model of associative memory: theoretical analysis", "year": 2005, "pdf": "http://ece.eng.wayne.edu/~mhassoun/ijcnn_05_theoretical.pdf"}, {"id": "8c62ce3db6d4147bcc45cba0e225f87a30878810", "title": "Research Advances in Face Recognition", "year": 2009, "pdf": null}, {"id": "ad6bcf4384a7604b6252a6eeefade4c486b01240", "title": "Cluster-Based Distributed Face Tracking in Camera Networks", "year": 2010, "pdf": "https://engineering.purdue.edu/RVL/movies/Josiah/Yoder10Cluster.pdf"}, {"id": "c0d096c31198be908b73f6ebed99435b7d913e06", "title": "Gender recognition with uniform local binary patterns", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404587"}, {"id": "7c0a6824b556696ad7bdc6623d742687655852db", "title": "MPCA+MDA: A novel approach for face recognition based on tensor objects", "year": 2010, "pdf": "http://2010.telfor.rs/files/radovi//TELFOR2010_05_35.pdf"}, {"id": "34f60ecedeb798397849b171e2e8bcf46c9b7ada", "title": "An Efficient Face Recognition System based on the Combination of Pose Invariant and Illumination Factors", "year": "2012", "pdf": "https://pdfs.semanticscholar.org/34f6/0ecedeb798397849b171e2e8bcf46c9b7ada.pdf"}, {"id": "51369f138c878e2bd17e6973f95328317868cbde", "title": "A novel approach for real time eye state detection in fatigue awareness system", "year": 2010, "pdf": null}, {"id": "1cdcccde3ad98329ffca275a2696c13b1d03ba9f", "title": "On using periocular biometric for gender classification in the wild", "year": "2016", "pdf": "http://doi.org/10.1016/j.patrec.2015.09.014"}, {"id": "7302d800c962b5cb705a269cda8525634cfe64a7", "title": "Decision Fusion for Multimodal Biometrics Using Social Network Analysis", "year": "2014", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6851197"}, {"id": "47795cbf06a2cc40bf961caa600a491691692dfe", "title": "Embedded face recognition based on fast genetic algorithm for intelligent digital photography", "year": 2006, "pdf": "https://doi.org/10.1109/TCE.2006.1706463"}, {"id": "27e0684fa5b57715162ac6c58a6ea283c7db1719", "title": "Select eigenfaces for face recognition with one training sample per subject", "year": 2004, "pdf": "https://doi.org/10.1109/ICARCV.2004.1468857"}, {"id": "f41bc477a7a0ba36d11f82ec55131913f9ffe678", "title": "Random permutation Maxout transform for cancellable facial template protection", "year": "2018", "pdf": "http://doi.org/10.1007/s11042-018-5956-y"}, {"id": "ddeacdc30d32da02fd8313657926ef8a99e57620", "title": "Selection and fusion of facial features for face recognition", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/ddea/cdc30d32da02fd8313657926ef8a99e57620.pdf"}, {"id": "656a5d4d84c450792402b3c69eecbdbca4cad4cb", "title": "2.1. Imagenet and Related Datasets", "year": "", "pdf": "http://pdfs.semanticscholar.org/656a/5d4d84c450792402b3c69eecbdbca4cad4cb.pdf"}, {"id": "1434c9140ba724c9a92f478781e890434e7a0215", "title": "The role of global and feature based information in gender classification of faces: a comparison of human performance and computational models", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/6b3f/798dac21e41a7fc13dacf18093b2c6eb79e4.pdf"}, {"id": "548233d67f859491e50c5c343d7d77a7531d4221", "title": "Robust detection of outliers for projection-based face recognition methods", "year": 2007, "pdf": "https://doi.org/10.1007/s11042-007-0176-x"}, {"id": "91b1a59b9e0e7f4db0828bf36654b84ba53b0557", "title": "Simultaneous Hallucination and Recognition of Low-Resolution Faces Based on Singular Value Decomposition", "year": 2015, "pdf": "http://www.kresttechnology.com/krest-academic-projects/krest-mtech-projects/ECE/MTech%20DSP%202015-16/MTech%20DSP%20BasePaper%202015-16/50.pdf"}, {"id": "dc2c353fa43a75ec92327e323ba5715c400bf92b", "title": "Face Recognition in Hyperspectral Images", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/dc2c/353fa43a75ec92327e323ba5715c400bf92b.pdf"}, {"id": "62c425be3e07e076d1339d1f2fe2ed319a859f02", "title": "Face Recognition & Gender Determination Using LIPS", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/62c4/25be3e07e076d1339d1f2fe2ed319a859f02.pdf"}, {"id": "af54dd5da722e104740f9b6f261df9d4688a9712", "title": "Portability: A New Challenge on Designing Family Image Database", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/af54/dd5da722e104740f9b6f261df9d4688a9712.pdf"}, {"id": "ec90d333588421764dff55658a73bbd3ea3016d2", "title": "Protocol for Systematic Literature Review of Face Recognition in Uncontrolled Environment", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/ec90/d333588421764dff55658a73bbd3ea3016d2.pdf"}, {"id": "00fb2836068042c19b5197d0999e8e93b920eb9c", "title": "Genetic Algorithm for Weight Optimization in Descriptor based Face Recognition Methods", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/00fb/2836068042c19b5197d0999e8e93b920eb9c.pdf"}, {"id": "8e1d84e08109b5c692f7eff5cbc1816e5bdb00a3", "title": "Adversarial Face Recognition and Phishing Detection Using Multi-Layer Data Fusion", "year": "2013", "pdf": "https://pdfs.semanticscholar.org/9d67/61789aa33c2610c7ceebc4eea52a6c71c443.pdf"}, {"id": "3516d0c6918bcf977ad004b337fb066a9d5a8e17", "title": "A Novel Face Hallucination with an Error Regression Model and MPCA in RGB Color Space", "year": "", "pdf": "https://pdfs.semanticscholar.org/3516/d0c6918bcf977ad004b337fb066a9d5a8e17.pdf"}, {"id": "c6ecb8e20250e3fa8ef2a5edd8fad4131f3e874f", "title": "EigenBody : Analysis of body shape for gender from noisy images", "year": "2010", "pdf": "https://pdfs.semanticscholar.org/c6ec/b8e20250e3fa8ef2a5edd8fad4131f3e874f.pdf"}, {"id": "49a7949fabcdf01bbae1c2eb38946ee99f491857", "title": "A concatenating framework of shortcut convolutional neural networks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/49a7/949fabcdf01bbae1c2eb38946ee99f491857.pdf"}, {"id": "a6f858256d209c62e869af60a0f347f7cef07281", "title": "Learning sparse feature for eyeglasses problem in face recognition", "year": "2011", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771437"}, {"id": "51173e0f31f362f3ea59ae3e98c5cdf31b2a2ec5", "title": "Face feature extraction and recognition based on discriminant subclass-center manifold preserving projection", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/5117/3e0f31f362f3ea59ae3e98c5cdf31b2a2ec5.pdf"}, {"id": "a6190a778a4e343ceb60bbd738df1cce094e7332", "title": "Robust lip feature detection in facial images", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393389"}, {"id": "a8385939ed0629d39d25053447f80a126627c307", "title": "Elucidating the neural correlates of related false memories using a systematic measure of perceptual relatedness", "year": "2017", "pdf": "https://www.sciencedirect.com/science/article/pii/S1053811916304670?dgcid=api_sd_search-api-endpoint"}, {"id": "912be40bda22038783af2bc0919fa181097c9f6f", "title": "Personal identification using periocular skin texture", "year": 2010, "pdf": "http://doi.acm.org/10.1145/1774088.1774408"}, {"id": "32fca37dcd496dc4306fb19be52f0988a742e4d9", "title": "Clustering-based discriminative locality alignment for face gender recognition", "year": 2012, "pdf": "https://doi.org/10.1109/IROS.2012.6385793"}, {"id": "d0eb3fd1b1750242f3bb39ce9ac27fc8cc7c5af0", "title": "Minimalistic CNN-based ensemble model for gender prediction from face images", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d0eb/3fd1b1750242f3bb39ce9ac27fc8cc7c5af0.pdf"}, {"id": "be66c2638b86964cbd67d66a8a6bc8346a06fad8", "title": "Face Recognition for Intelligent Robot Safety Verification System", "year": 2017, "pdf": null}, {"id": "3a3aff63229d6b6e5310985d2c7fc18808895b7b", "title": "Kernel linear regression for face recognition", "year": 2013, "pdf": "https://doi.org/10.1007/s00521-013-1435-6"}, {"id": "b21b860cf21edb2e57f62b665203e5d8b64829d5", "title": "Extended feed forward neural networks with random weights for face recognition", "year": 2014, "pdf": "https://doi.org/10.1016/j.neucom.2014.01.022"}, {"id": "418b106e1f072c4da400b516079f429d84cd7305", "title": "Model-based face computation", "year": 2004, "pdf": "http://pdfs.semanticscholar.org/418b/106e1f072c4da400b516079f429d84cd7305.pdf"}, {"id": "6d838f3628dd3e8cdbc7da0b1b472c8c71eb0124", "title": "Multiple Neural Networks and Bayesian Belief Revision for a never-ending unsupervised learning", "year": 2010, "pdf": "https://doi.org/10.1109/ISDA.2010.5687229"}, {"id": "749146938bb9c953348e105acde9f261f15fb2cd", "title": "Learning the best subset of local features for face recognition", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/7491/46938bb9c953348e105acde9f261f15fb2cd.pdf"}, {"id": "367c571480ac46d48be050dee4e6103a0ebb5db5", "title": "Multimedia Content Based Image Retrieval Iii: Local Tetra Pattern", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/367c/571480ac46d48be050dee4e6103a0ebb5db5.pdf"}, {"id": "c4330fabc238fa114f0b8e7b1c099e6565026cdf", "title": "Adaptive Context-Aware Filter Fusion for Face Recognition on Bad Illumination", "year": 2006, "pdf": "https://doi.org/10.1007/11892960_65"}, {"id": "83b79e4a0be3db276b12ea7164cded4b755cb215", "title": "Fusion of face and iris biometrics using local and global feature extraction methods", "year": 2014, "pdf": "https://doi.org/10.1007/s11760-012-0411-4"}, {"id": "214e544d3b01ddbc24e3093bbf186fdecc600342", "title": "Automated conformance testing for ISO/IEC 19794-5 Standard on facial photo specifications", "year": 2013, "pdf": "https://doi.org/10.1504/IJBM.2013.050734"}, {"id": "5fac23560b5ba4d45045f1edd837ed4bb6c1f5ca", "title": "Optimization of a Face Verification System using Bayesian Screening Techniques", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/5fac/23560b5ba4d45045f1edd837ed4bb6c1f5ca.pdf"}, {"id": "471befc1b5167fcfbf5280aa7f908eff0489c72b", "title": "Class-Specific Kernel-Discriminant Analysis for Face Verification", "year": 2007, "pdf": "https://doi.org/10.1109/TIFS.2007.902915"}, {"id": "fb2ac6befd22fde3e6cff180d3431f02c81bc32a", "title": "Genetic optimisation of illumination compensation methods in cascade for face recognition", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/fb2a/c6befd22fde3e6cff180d3431f02c81bc32a.pdf"}, {"id": "6d91da37627c05150cb40cac323ca12a91965759", "title": "Gender Politics in the 2016 U.S. Presidential Election: A Computer Vision Approach", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/6d91/da37627c05150cb40cac323ca12a91965759.pdf"}, {"id": "f09c9adff8f71c1be5ebbb1bb01d4d762aa385e9", "title": "A gender classification system robust to occlusion using Gabor features based (2D)2PCA", "year": 2014, "pdf": "https://doi.org/10.1016/j.jvcir.2014.03.009"}, {"id": "7fc5ab3743e6e9a2f4fe70152440e13a673e239b", "title": "Improved Face Recognition Rate Using HOG Features and SVM Classifier", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/7fc5/ab3743e6e9a2f4fe70152440e13a673e239b.pdf"}, {"id": "e74c4cf90c5bbb88a8ae77aaa5709984f7e6a80f", "title": "Viewpoint Unconstrained Face Recognition Based on Affine Local Descriptors and Probabilistic Similarity", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/e74c/4cf90c5bbb88a8ae77aaa5709984f7e6a80f.pdf"}, {"id": "5ab4d05729884f5ce806ed77f38b3972f3b430f6", "title": "Cost-sensitive subspace learning for face recognition", "year": 2010, "pdf": "http://vipl.ict.ac.cn/homepage/ACCV14Metric/ref/18_Cost-Sensitive%20Subspace%20Learning%20for%20Face%20Recognition_CVPR2010.pdf"}, {"id": "cbefe7939bc7f44274ebbc161c14054b55db9af2", "title": "An Extended Local Binary Pattern for Gender Classification", "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6746813"}, {"id": "d6255a0db6f8f157c5c901d758c7a5f36416ab51", "title": "Face Recognition Using Gabor Wavelet Transform", "year": "2001", "pdf": "https://pdfs.semanticscholar.org/d625/5a0db6f8f157c5c901d758c7a5f36416ab51.pdf"}, {"id": "af80dfe0dacf992c49543a4bac53d1974cb70e2e", "title": "When Holistic Processing is Not Enough: Local Features Save the Day", "year": 2004, "pdf": "http://pdfs.semanticscholar.org/af80/dfe0dacf992c49543a4bac53d1974cb70e2e.pdf"}, {"id": "197598af5be60fc75535c2f90849e60ac7122871", "title": "Fast multi-view face tracking with pose estimation", "year": 2008, "pdf": "http://ieeexplore.ieee.org/document/7080569/"}, {"id": "e4afc03c818bc9e357ea2fb23ebf73496f1ffc81", "title": "Mafalda Libo\u0301rio Baio Morais Alves", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/e4af/c03c818bc9e357ea2fb23ebf73496f1ffc81.pdf"}, {"id": "da61e3f62eda5e1cea027f73a156da36262722b0", "title": "Un nouvel ensemble de descripteurs de Fourier Clifford pour les images couleur. Les GCFD3", "year": "2012", "pdf": "https://pdfs.semanticscholar.org/fcb6/f1bac7b3c909ba6ae3a39e4ea097564064ec.pdf"}, {"id": "6c964e59bdac6b8044993ca96b47a9a0addedfb8", "title": "First Impressions: A Survey on Computer Vision-Based Apparent Personality Trait Analysis", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.08046.pdf"}, {"id": "9c5c30e396b307010c374ef3730e84f6cf763205", "title": "Recent advances in visual and infrared face recognition - a review", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/9c5c/30e396b307010c374ef3730e84f6cf763205.pdf"}, {"id": "80c4f5bc43f21041343c6d7a61cdc281cb36be07", "title": "Information routing, correspondence finding, and object recognition in the brain", "year": "2008", "pdf": "https://pdfs.semanticscholar.org/595b/e23477495891ec07fd5f5cd48b7fee5d7fe6.pdf"}, {"id": "beaac021fa41904ad8247936bccdc7ea17ee9f53", "title": "Alignment of face images based on SIFT feature", "year": 2014, "pdf": "https://doi.org/10.1109/ICMLC.2014.7009675"}, {"id": "085913122bb75391ea8c73f2625f4679fe9cb3b1", "title": "Vector array based Multi-View Face Detection with compound exemplars", "year": 2012, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6248053"}, {"id": "8ec7fff88b2e5b49154e6654e5e27f6678ddb7f0", "title": "On identification from periocular region utilizing SIFT and SURF", "year": 2014, "pdf": "http://ieeexplore.ieee.org/document/6952498/"}, {"id": "7a9ef491914d515bb5570aa9b3a261d42d430b86", "title": "Excuse Me, Do I Know You From Somewhere? Unaware Facial Recognition Using Brain-Computer Interfaces", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/7a9e/f491914d515bb5570aa9b3a261d42d430b86.pdf"}, {"id": "79033ec1b2c86034908febd444d6ed3c753e17b3", "title": "Face Recognition via Globality-Locality Preserving Projections", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/7903/3ec1b2c86034908febd444d6ed3c753e17b3.pdf"}, {"id": "076c97826df63f70d55ea11f0b7ae47a7ad81ad3", "title": "A Robust Face Recognition Algorithm Using Markov Stationary Features and Adjacent Pixel Intensity Difference Quantization Histogram", "year": 2011, "pdf": "http://doi.ieeecomputersociety.org/10.1109/SITIS.2011.40"}, {"id": "8028b9bc85b64f3b93d936ab2bd05ccf64a92bc3", "title": "Enhancement of face-sensitive ERPs in older adults induced by face recognition training", "year": "2018", "pdf": "https://www.sciencedirect.com/science/article/pii/S0028393218304846?dgcid=api_sd_search-api-endpoint"}, {"id": "2c53cb4222cd9ccc868a07d494b8a4ce102658fa", "title": "Face recognition across pose: A review", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/2c53/cb4222cd9ccc868a07d494b8a4ce102658fa.pdf"}, {"id": "bf4073f334902cd76608655b2d54a624ccdbface", "title": "Data extraction for user profile management based on behavior", "year": 2015, "pdf": null}, {"id": "b1444b3bf15eec84f6d9a2ade7989bb980ea7bd1", "title": "Local Directional Relation Pattern for Unconstrained and Robust Face Retrieval", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/b144/4b3bf15eec84f6d9a2ade7989bb980ea7bd1.pdf"}, {"id": "30457461333c8797457c18636732327e6dde1d04", "title": "Gender classification system for half face images using multi manifold discriminant analysis", "year": 2017, "pdf": null}, {"id": "3fbd68d1268922ee50c92b28bd23ca6669ff87e5", "title": "A shape- and texture-based enhanced Fisher classifier for face recognition", "year": 2001, "pdf": "http://pdfs.semanticscholar.org/f563/6a8021c09870c350e7505c87625fe1681bd4.pdf"}, {"id": "f77563386ac293620ce2b90b5d7250ab5d8f9f50", "title": "Regression-based Hypergraph Learning for Image Clustering and Classification", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/f775/63386ac293620ce2b90b5d7250ab5d8f9f50.pdf"}, {"id": "d6dab84451254d7fbb5b9e1d40a7d2a92dec13b3", "title": "Enhanced Local Binary Patterns for Automatic Face Recognition", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/d6da/b84451254d7fbb5b9e1d40a7d2a92dec13b3.pdf"}, {"id": "434300db45ea1b0b36b0c606ca728aaaed235275", "title": "The Effect of Distinctiveness in Recognizing Average Face: Human Recognition and Eigenface Based Machine Recognition", "year": "2013", "pdf": "https://pdfs.semanticscholar.org/4343/00db45ea1b0b36b0c606ca728aaaed235275.pdf"}, {"id": "cba987b9d2da056c23e052705f0eddedb12b086d", "title": "Facial feature representation with directional ternary pattern (DTP): Application to gender classification", "year": 2012, "pdf": "https://doi.org/10.1109/IRI.2012.6303005"}, {"id": "58493390be8aeeabbd974e3d81f7db49af1ceed1", "title": "Real-time eye detection using face-circle fitting and dark-pixel filtering", "year": "2004", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1394426"}, {"id": "3b1f8bf9e41b29d38968144ae9b2ae13a9bde731", "title": "Recognition invariance obtained by extended and invariant features", "year": 2004, "pdf": "https://doi.org/10.1016/j.neunet.2004.01.006"}, {"id": "3abe50d0a806a9f5a5626f60f590632a6d87f0c4", "title": "Estimating pose and illumination direction for frontal face synthesis", "year": 2008, "pdf": "http://vis.uky.edu/~gravity/publications/2008/Estimating_Xinyu.pdf"}, {"id": "61084a25ebe736e8f6d7a6e53b2c20d9723c4608", "title": "Face recognition for web-scale datasets", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/6108/4a25ebe736e8f6d7a6e53b2c20d9723c4608.pdf"}, {"id": "05f2c7cdaa237e42338e2f626af1fe79fcc4bf21", "title": "Optimized selection of benchmark test parameters for image watermark algorithms based on Taguchi methods and corresponding influence on design decisions for real-world applications", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/05f2/c7cdaa237e42338e2f626af1fe79fcc4bf21.pdf"}, {"id": "449f93b4be37087236c6a13e9db4c1c323683a58", "title": "Abnormalities in early visual processes are linked to hypersociability and atypical evaluation of facial trustworthiness: An ERP study with Williams syndrome", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/449f/93b4be37087236c6a13e9db4c1c323683a58.pdf"}, {"id": "0328b975b11fd4153010535d1a06cf618f9b0c91", "title": "Evaluating the Complexity of Databases for Person Identification and Verification Evaluating the Complexity of Databases for Person Identification and Verification", "year": 1999, "pdf": "http://pdfs.semanticscholar.org/0328/b975b11fd4153010535d1a06cf618f9b0c91.pdf"}, {"id": "1fee632cad6a3853cc43621ba50161f5dd6263e5", "title": "Gated classifiers: Boosting under high intra-class variation", "year": 2011, "pdf": "http://www.csc.kth.se/~osda02/files/danielssonCVPR11poster.pdf"}, {"id": "af6b14c0bf427b22baffc06a0bd515b30649d8c3", "title": "Face recognition using kernel scatter-difference-based discriminant analysis", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/af6b/14c0bf427b22baffc06a0bd515b30649d8c3.pdf"}, {"id": "7eed1dbf1d09dd48f75ae25c5e39182afae6862f", "title": "A New Performance Evaluation Method for Face Identification - Regression Analysis of Misidentification Risk", "year": 2007, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2007.383276"}, {"id": "cf254aaecb3b6f0f127a8ed620c11223bd171ccc", "title": "Boosting gender recognition performance with a fuzzy inference system", "year": "2015", "pdf": "http://doi.org/10.1016/j.eswa.2014.11.023"}, {"id": "4897bb843f0f397d150f4c210a3bd9990db881e3", "title": "Face recognition based on an improved center symmetric local binary pattern", "year": "2017", "pdf": "http://doi.org/10.1007/s00521-017-2963-2"}, {"id": "97e5beb344cc68fbfe19a34e061f886c962c81e4", "title": "Representing and Learning High Dimensional Data with the Optimal Transport Map from a Probabilistic Viewpoint", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8578918"}, {"id": "09718bf335b926907ded5cb4c94784fd20e5ccd8", "title": "Recognizing partially occluded, expression variant faces from single training image per person with SOM and soft k-NN ensemble", "year": 2005, "pdf": "https://doi.org/10.1109/TNN.2005.849817"}, {"id": "e9d426570413ba96abe769e8fc305d7f0cca6723", "title": "Bayesian face recognition and perceptual narrowing in face-space.", "year": "2012", "pdf": "https://www.ncbi.nlm.nih.gov/pubmed/22709406"}, {"id": "7bfc38e15589f9ffb471e152297f5419fdc9ba8d", "title": "Gender classification using M-estimator based radial basis function neural network", "year": 2014, "pdf": "http://ieeexplore.ieee.org/document/7514523/"}, {"id": "98960be5ae51d30118f091f7091299a49f2f34bb", "title": "Global and Feature Based Gender Classification of Faces: a Comparison of Human Performance and Computational Models", "year": 2004, "pdf": "http://pdfs.semanticscholar.org/9896/0be5ae51d30118f091f7091299a49f2f34bb.pdf"}, {"id": "2fe5bea76853da079512595fb8bdd74ef4ccf12f", "title": "Face recognition using two-dimensional nonnegative principal component analysis", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/2fe5/bea76853da079512595fb8bdd74ef4ccf12f.pdf"}, {"id": "13a82da2bfa24583caf78ab1d14b5cfa4798b3b3", "title": "Robust face hallucination using quantization-adaptive dictionaries", "year": 2016, "pdf": "https://www.um.edu.mt/__data/assets/file/0006/289446/icip_2016.pdf"}, {"id": "cd841bdd90447085542fa3a6a422437e97edddc8", "title": "Preserving spatial information and overcoming variations in appearance for face recognition", "year": 2010, "pdf": "https://doi.org/10.1007/s10044-010-0188-4"}, {"id": "9755554b13103df634f9b1ef50a147dd02eab02f", "title": "How Transferable Are CNN-Based Features for Age and Gender Classification?", "year": 2016, "pdf": "https://doi.org/10.1109/BIOSIG.2016.7736925"}, {"id": "60cdd2ae71d39f2a8a3c6d4c22284a602428b347", "title": "Image of face captured Face Detection and localization Feature extraction Learning Classification Decision", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/60cd/d2ae71d39f2a8a3c6d4c22284a602428b347.pdf"}, {"id": "0098f42c6f24d5e6d2471c1d7d1e864ff8e83226", "title": "Review on Face Recognition across Age Progression", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/0098/f42c6f24d5e6d2471c1d7d1e864ff8e83226.pdf"}, {"id": "591db404f6d8e3f37c12752b0f9173ff1a475598", "title": "Eigentransformation-based face super-resolution in the wavelet domain", "year": 2012, "pdf": "https://doi.org/10.1016/j.patrec.2011.12.001"}, {"id": "294eef6848403520016bb2c93bfb71b3c75c73fa", "title": "Extension of Robust Principal Component Analysis for Incremental Face Recognition", "year": "2013", "pdf": "https://pdfs.semanticscholar.org/294e/ef6848403520016bb2c93bfb71b3c75c73fa.pdf"}, {"id": "0756efe121e37479157010e18723e0c8da02a34b", "title": "Face authentication in encrypted domain based on correlation filters", "year": 2017, "pdf": null}, {"id": "4da80ad59adbe530838b5685935b488edab07c7d", "title": "Effective Proximity Retrieval by Ordering Permutations", "year": 2008, "pdf": "http://www.dcc.uchile.cl/~gnavarro/algoritmos/ps/tpami07.pdf"}, {"id": "7aa83aee1e8b2da7ec90c67e63161c24e85f4ba1", "title": "Face image classification by pooling raw features", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/7aa8/3aee1e8b2da7ec90c67e63161c24e85f4ba1.pdf"}, {"id": "9329523dc0bd4e2896d5f63cf2440f21b7a16f16", "title": "Do They All Look the Same? Deciphering Chinese, Japanese and Koreans by Fine-Grained Deep Learning", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d853/107e81c3db4a7909b599bff82ab1c48772af.pdf"}, {"id": "2f8cf3747f3c7d8e230ad9ab3dbc5ea4e6b9cdf1", "title": "NIMBLE: a kernel density model of saccade-based visual memory.", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/c9c7/cbbd4fcf81aafd9877043f96b88411c30cb1.pdf"}, {"id": "f07468f7b23a4b5740787a7d9d2c7706991b51ed", "title": "A Continuos Learning for a Face Recognition System", "year": "2011", "pdf": null}, {"id": "c45183ec95f89aff793a2629a0520006b4153d6a", "title": "Entropy-based template analysis in face biometric identification systems", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/c451/83ec95f89aff793a2629a0520006b4153d6a.pdf"}, {"id": "20f64a00593ae2c6ebbe87b41394335152f9b165", "title": "A Hybrid of Principal Component Analysis and Partial Least Squares for Face Recognition across Pose", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/20f6/4a00593ae2c6ebbe87b41394335152f9b165.pdf"}, {"id": "ccb54fc5f263a8bc2a8373839cb6855f528f10d3", "title": "A realistic virtual environment for evaluating face analysis systems under dynamic conditions", "year": "2016", "pdf": "http://doi.org/10.1016/j.patcog.2015.11.008"}, {"id": "e98022f21f7d31a056e027bd1d499a5285ed52f7", "title": "Face recognition under partial occlusion and noise", "year": 2013, "pdf": null}, {"id": "bad65ef11c58c3660febb98abbe9f86e2b8fce11", "title": "Object Localization Based on Mutual Information in Global Structure Constraint Model", "year": 2008, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CISIM.2008.9"}, {"id": "510257c8cedc079aae88ccbf3dd10a84051c8fdc", "title": "Fast Facial Image Super-Resolution via Local Linear Transformations for Resource-Limited Applications", "year": 2011, "pdf": "https://doi.org/10.1109/TCSVT.2011.2163461"}, {"id": "27da432cf2b9129dce256e5bf7f2f18953eef5a5", "title": "Face Recognition in Low Quality Images: A Survey", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.11519.pdf"}, {"id": "bcf52629788e210f9c87945fa9cf792609e9154a", "title": "Facial Asymmetry Quanti cation for Expression Invariant Human Identi cation", "year": "", "pdf": "http://pdfs.semanticscholar.org/bcf5/2629788e210f9c87945fa9cf792609e9154a.pdf"}, {"id": "a66d99ea07f72281a6e510eb2a9fd4b0973e16d0", "title": "Object Localization Based on Global Structure Constraint Model and Optimal Algorithm", "year": 2008, "pdf": null}, {"id": "68d7be102d8b298d172d120d5a7ae1bfd5b8ffb9", "title": "(2D)2PCA: Two-directional two-dimensional PCA for efficient face representation and recognition", "year": 2005, "pdf": "https://doi.org/10.1016/j.neucom.2005.06.004"}, {"id": "15dea987f66386be14b7811f1f27784f3ed9e9c0", "title": "Face Detection with Mixtures of Boosted Discriminant Features", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/15de/a987f66386be14b7811f1f27784f3ed9e9c0.pdf"}, {"id": "9401465784cb5107cabb87d7c2ffc0a594e1dafe", "title": "Generalized YAST algorithm for signal subspace tracking", "year": 2015, "pdf": "https://doi.org/10.1016/j.sigpro.2015.04.025"}, {"id": "0ba67a76c6e7d37d3fdc0017cd40a68e503168f1", "title": "Improve recognition performance by hybridizing principal component analysis (PCA) and elastic bunch graph matching (EBGM)", "year": 2014, "pdf": "https://doi.org/10.1109/CIMSIVP.2014.7013270"}, {"id": "7d0cb85f9f63afc23ce42b92337b12ef91fc091e", "title": "Discriminative transfer learning for single-sample face recognition", "year": 2015, "pdf": "https://pdfs.semanticscholar.org/7d0c/b85f9f63afc23ce42b92337b12ef91fc091e.pdf"}, {"id": "ac2e3a889fc46ca72f9a2cdedbdd6f3d4e9e2627", "title": "Age detection from a single image using multitask neural networks : An overview and design proposal", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/ac2e/3a889fc46ca72f9a2cdedbdd6f3d4e9e2627.pdf"}, {"id": "15df73918e084a146cd215b839a3eec1cc813a78", "title": "Projection Peak Analysis for Rapid Eye Localization", "year": "2009", "pdf": "https://pdfs.semanticscholar.org/15df/73918e084a146cd215b839a3eec1cc813a78.pdf"}, {"id": "a044dc0ff3fa2a8f0a80afff9eb2cbc762996e3b", "title": "An Efficient Face Recognition System Based On the Hybridization of Pose Invariant and Illumination Process", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/a044/dc0ff3fa2a8f0a80afff9eb2cbc762996e3b.pdf"}, {"id": "aaa4c625f5f9b65c7f3df5c7bfe8a6595d0195a5", "title": "Biometrics in ambient intelligence", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/aaa4/c625f5f9b65c7f3df5c7bfe8a6595d0195a5.pdf"}, {"id": "d045f43bd9cbd5ad4833be011e3a33db55d1f7ce", "title": "Sparse factorial code representation using independent component analysis for face recognition", "year": "2017", "pdf": "http://doi.org/10.1007/s11042-017-5542-8"}, {"id": "9019ad9140383a406b8d87a2dc9a99ab1d16aabb", "title": "Effects of the facial and racial features on gender classification", "year": 2010, "pdf": "http://www.researchgate.net/profile/Ece_Gunes/publication/266850463_Effects_of_the_Facial_and_Racial_Features_on_Gender_Classification/links/548e85ca0cf214269f243ef7.pdf"}, {"id": "60c36bfa7881435e2111fe3e522a36880dee6d09", "title": "Study of the Changing Trends in Facial Expression Recognition", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/60c3/6bfa7881435e2111fe3e522a36880dee6d09.pdf"}, {"id": "4688787d064e59023a304f7c9af950d192ddd33e", "title": "Investigating the Discriminative Power of Keystroke Sound", "year": 2015, "pdf": "http://www.cse.msu.edu/~liuxm/publication/Roth_Liu_Ross_Metaxas_TIFS.pdf"}, {"id": "d84d48f842d271fe3e02da1a55d8137da5d4b756", "title": "Feature Extraction and Dimensionality Reduction Using Radon and Fourier Transforms with Application to Face Recognition", "year": 2007, "pdf": null}, {"id": "a2b24511ab903adb0529fe36d043dde64f15abc6", "title": "Soft-biometric detection based on supervised learning", "year": 2014, "pdf": "https://doi.org/10.1109/ICARCV.2014.7064310"}, {"id": "4707175ebc50e4036412f441a7cec6673c4ad31f", "title": "Analysis and Comparison of Eigenspace-Based Face Recognition Approaches", "year": 2002, "pdf": "http://pdfs.semanticscholar.org/71f6/49ba0e46684a9f0c223d65c28b61bed38297.pdf"}, {"id": "b6c0397fc911e12519655ec1eb1074574ae6f290", "title": "Feature Subspace Estimation for Resolving Dimensionality Mismatch in Practical Color-Based Face Recognition Systems", "year": 2016, "pdf": "https://doi.org/10.1142/S0218001416560048"}, {"id": "a9f15f6a6d94e5fa421e592ce1e577d1966af954", "title": "Interest filter vs. interest operator: Face recognition using Fisher linear discriminant based on interest filter representation", "year": "2008", "pdf": "http://doi.org/10.1016/j.patrec.2008.05.023"}, {"id": "7c5c4ed47bccd0016d53b7bbc27a41dd74bebf1e", "title": "Adaptive generic learning for face recognition from a single sample per person", "year": 2010, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2010.5539990"}, {"id": "05d7812269e8a1c229e45665efdf88eeae605dfc", "title": "Centralized Gabor gradient histogram for facial gender recognition", "year": 2010, "pdf": "https://doi.org/10.1109/ICNC.2010.5584287"}, {"id": "5828c50ddde3b819403ca9c447f65d0a2accaec8", "title": "Recognizing Face or Object from a Single Image: Linear vs. Kernel Methods on 2D Patterns", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/5828/c50ddde3b819403ca9c447f65d0a2accaec8.pdf"}, {"id": "843f873c08df64431baefd79e83e4b70236427de", "title": "Exploring and Understanding the High Dimensional and Sparse Image Face Space: a Self-Organized Manifold Mapping", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/843f/873c08df64431baefd79e83e4b70236427de.pdf"}, {"id": "ed5fdbecb5f655333b3e24d448a9665d18e6b5cd", "title": "Effects of aging on the neural correlates of successful item and source memory encoding.", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/ed5f/dbecb5f655333b3e24d448a9665d18e6b5cd.pdf"}, {"id": "dcdedd9f4f3ba370903c2d0c976f82fd63da6266", "title": "Face recognition using discrete wavelet graph", "year": 2003, "pdf": "https://doi.org/10.1109/ICASSP.2003.1201632"}, {"id": "422ca72f0ce9d63d2fab1ecc7f3c77e5c0fbfbff", "title": "MEG: Texture operators for multi-expert gender classification", "year": 2017, "pdf": "https://doi.org/10.1016/j.cviu.2016.09.004"}, {"id": "c7aff56c47c9b26691b420ecfc72caf4901ef606", "title": "Face recognition by combining Gabor wavelets and nearest neighbor discriminant analysis", "year": 2008, "pdf": null}, {"id": "7ed4d134e1910ded71697aa7420f2fb720596d4b", "title": "PCCA: A new approach for distance learning from sparse pairwise constraints", "year": "2012", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6247987"}, {"id": "6554ca3187b3cbe5d1221592eb546dfc11aac14b", "title": "Hybrid method based on topography for robust detection of iris center and eye corners", "year": 2013, "pdf": "http://doi.acm.org/10.1145/2501643.2501647"}, {"id": "311848708312ccceaaeb82bb7dce31db5e63a7ed", "title": "Spatial But Not Oculomotor Information Biases Perceptual Memory: Evidence From Face Perception and Cognitive Modeling", "year": 2017, "pdf": "https://doi.org/10.1111/cogs.12437"}, {"id": "3528125426646dfb7b9ec2bfb937e1e70028bcbd", "title": "An Improved Kernel Minimum Square Error Classification Algorithm Based on $L_{2,1}$ -Norm Regularization", "year": 2017, "pdf": "https://doi.org/10.1109/ACCESS.2017.2730218"}, {"id": "ccc874b0d73690d18a84229f40af438696fb1b27", "title": "Agent-based active-vision system reconfiguration for autonomous surveillance of dynamic, multi-object environments", "year": 2005, "pdf": "https://doi.org/10.1109/IROS.2005.1545043"}, {"id": "1176a74fb9351ac2de81c198c4861d78e58f172d", "title": "Homomorphic filtering based illumination normalization method for face recognition", "year": 2011, "pdf": "https://doi.org/10.1016/j.patrec.2011.03.023"}, {"id": "e7796cb863c1e25b393ef05241e222a6355bd4e4", "title": "Conformation-Based Hidden Markov Models: Application to Human Face Identification", "year": 2010, "pdf": "https://doi.org/10.1109/TNN.2009.2039875"}, {"id": "96813c14f3de1b8f7472caff7a99b4145267f25c", "title": "Argus: the digital doorman", "year": 2001, "pdf": "http://www-2.cs.cmu.edu/~rahuls/pub/ieee-is-rahuls.pdf"}, {"id": "a36f79f541a4049b615c8e12531a6e5ab88d1aae", "title": "Nonlinear Dynamic Projection for Noise Reduction of Dispersed Manifolds", "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2014.2318727"}, {"id": "23ee98fece06ec19bbdc399f1ef5412265275f9c", "title": "Ongoing Challenges in Face Recognition", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/23ee/98fece06ec19bbdc399f1ef5412265275f9c.pdf"}, {"id": "bd3a3884718015cde3eb8b0fdeae94eb1702a233", "title": "A Hierarchical Compositional Model for Representation and Sketching of High-Resolution Human Images", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/bd3a/3884718015cde3eb8b0fdeae94eb1702a233.pdf"}, {"id": "e533d2321ccdd69f64be60ce1e503c09eb3dfed0", "title": "Enhanced face recognition using Cross Local Radon Binary Patterns", "year": 2015, "pdf": "https://doi.org/10.1109/ICCE.2015.7066492"}, {"id": "a6f13b609450139d09a97d0f18033cc45f5701c9", "title": "Unsupervised 2D Dimensionality Reduction with Adaptive Structure Learning", "year": "2017", "pdf": "http://doi.org/10.1162/NECO_a_00950"}, {"id": "d54703c366bce363130f1e633e033a0116c8a0da", "title": "Review on Emotion Recognition Databases", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/d547/03c366bce363130f1e633e033a0116c8a0da.pdf"}, {"id": "132cc530230cb869318b0a9d81a717077895db38", "title": "Emancipation of Upper Bound Greedy Algorithm in Detection of Nodes in Social Networks", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/132c/c530230cb869318b0a9d81a717077895db38.pdf"}, {"id": "b55058e3a67677206f0b0816abf980ecebbfef86", "title": "Optimized features selection for gender classification using optimization algorithms", "year": "2013", "pdf": "http://doi.org/10.3906/elk-1203-51"}, {"id": "dae35f1f2c581d9e632cccb8d279b56a4f1deb79", "title": "Contribution to the Fusion of Biometric Modalities by the Choquet Integral", "year": "2012", "pdf": "https://pdfs.semanticscholar.org/dae3/5f1f2c581d9e632cccb8d279b56a4f1deb79.pdf"}, {"id": "1d3733e1a4b9d3e5b95fb7cc135a0819cba90c22", "title": "Linear subspace methods in face recognition", "year": "2011", "pdf": "http://ethos.bl.uk/OrderDetails.do?uin=uk.bl.ethos.548966"}, {"id": "2168ec12eff5c3d1ff09d0f3c13d6df5b5061164", "title": "Face recognition with salient local gradient orientation binary patterns", "year": 2009, "pdf": "https://doi.org/10.1109/ICIP.2009.5413904"}, {"id": "8e16ca7a91aea52788f1ed5ce12c906117a56cb4", "title": "Face hallucination based on sparse local-pixel structure", "year": 2014, "pdf": "https://doi.org/10.1016/j.patcog.2013.09.012"}, {"id": "ffeff854e7fcf5af663497be00c86537f7d9ed11", "title": "Face recognition in JPEG compressed domain: a novel coefficient selection approach", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/ffef/f854e7fcf5af663497be00c86537f7d9ed11.pdf"}, {"id": "62857147a6809063671614d62f43605130757b1c", "title": "Unsupervised Discriminant Projection Analysis for Feature Extraction", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/6285/7147a6809063671614d62f43605130757b1c.pdf"}, {"id": "c20ac2441e6ec29ae926d3c5605b71ce10ef6dff", "title": "Heterogeneous image transformation", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/c20a/c2441e6ec29ae926d3c5605b71ce10ef6dff.pdf"}, {"id": "ff60f453888ac00a0e0b6f1649c1a699515a955f", "title": "Fast and Accurate Face Recognition Using Support Vector Machines", "year": 2005, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2005.578"}, {"id": "47d32615dc396dece539b664969d2e102f5b0f63", "title": "Multibiometric system using fuzzy level set, and genetic and evolutionary feature extraction", "year": "2015", "pdf": "http://doi.org/10.1049/iet-bmt.2014.0064"}, {"id": "ff946df1cea6c107b2c336419c34ea69cc3ddbc4", "title": "EGA — Ethnicity, gender and age, a pre-annotated face database", "year": 2012, "pdf": null}, {"id": "9a5e36c0f9b802cd346e3225c0b04ab9dda496d1", "title": "Evolutionary Classifier Fusion for Optimizing Face Recognition", "year": 2007, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FBIT.2007.50"}, {"id": "96d3280d7af05eb1968aed07cd50c305952cf83b", "title": "Linear Discriminant Analysis Based on L1-Norm Maximization", "year": 2013, "pdf": "https://doi.org/10.1109/TIP.2013.2253476"}, {"id": "282e2f52d8dcac36c8c1a35cdee66da52a07e2ea", "title": "Sparse representation classification based on difference subspace", "year": 2016, "pdf": "https://doi.org/10.1109/CEC.2016.7744329"}, {"id": "82a610a59c210ff77cfdde7fd10c98067bd142da", "title": "Human attention and intent analysis using robust visual cues in a Bayesian framework", "year": "2006", "pdf": "https://pdfs.semanticscholar.org/82a6/10a59c210ff77cfdde7fd10c98067bd142da.pdf"}, {"id": "e8a5800db4b7609e3a55ec4b904b263cd359df2e", "title": "Face Recognition using Neural Network and Eigenvalues with Distinct Block Processing", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/e8a5/800db4b7609e3a55ec4b904b263cd359df2e.pdf"}, {"id": "144523508d89f164c61924a4c4fa6946fb5545f5", "title": "A Comparison of Gabor Filter Methods for Automatic Detection of Facial Landmarks", "year": 2002, "pdf": "http://pdfs.semanticscholar.org/1445/23508d89f164c61924a4c4fa6946fb5545f5.pdf"}, {"id": "8009291e2f52d8bc586b5e0be9aa1b91ce12c711", "title": "A Configurable Multibiometric System for Authentication at Different Security Levels Using Mobile Devices", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/SRDSW.2016.14"}, {"id": "f59ccad6b7f76bd8cd5d6bbf867b2236c9b01e02", "title": "Comparison of three face recognition algorithms", "year": 2012, "pdf": null}, {"id": "a0b2df8f72ff672cb0760c5221657a5f48f0ec5d", "title": "Searching Image Databases Using Appearance Models", "year": "", "pdf": "http://pdfs.semanticscholar.org/a0b2/df8f72ff672cb0760c5221657a5f48f0ec5d.pdf"}, {"id": "862234d7e26aed33cd5287be256cd9ec23b0b489", "title": "Detection of eyes by circular Hough transform and histogram of gradient", "year": "2012", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460500"}, {"id": "020f5848c11b43ca5f7cf02f9198649895422426", "title": "Real-time face alignment with tracking in video", "year": 2008, "pdf": "http://media.cs.tsinghua.edu.cn/~imagevision/papers/icip08-1082.pdf"}, {"id": "4cfae149d6acd8cffc12c06ed796f1f84dce0e73", "title": "Face Recognition Based on Image Latent Semantic Analysis Model and SVM", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/4cfa/e149d6acd8cffc12c06ed796f1f84dce0e73.pdf"}, {"id": "2713423d87d011c0a5aae99bef57523769121a1d", "title": "A Codebook Design Method for Robust VQ-Based Face Recognition Algorithm", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/2713/423d87d011c0a5aae99bef57523769121a1d.pdf"}, {"id": "42afe5fd3f7b1d286a20e9306c6bc8624265f658", "title": "Face Detection Using the 3\u00d73 Block Rank Patterns of Gradient Magnitude Images", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/42af/e5fd3f7b1d286a20e9306c6bc8624265f658.pdf"}, {"id": "4cbf873d3d359200267a3bc33d45c442061f6989", "title": "Robust FFT-Based Scale-Invariant Image Registration with Image Gradients", "year": 2010, "pdf": "https://ibug.doc.ic.ac.uk/media/uploads/documents/ieee_tpami_2010.pdf"}, {"id": "f7c155f9bfb55ecd7518feb7cc8854aee36b43ed", "title": "Multimanifold analysis with adaptive neighborhood in DCT domain for face recognition using single sample per person", "year": 2014, "pdf": null}, {"id": "a82b47e94a8982b9c1ca4bfab54ddeaf856d0be3", "title": "Gender Classification Using Local Directional Pattern (LDP)", "year": 2010, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICPR.2010.373"}, {"id": "2df6aa6dd2683fb35f7dcd8536d7b67fb72ede12", "title": "What drives social in-group biases in face recognition memory? ERP evidence from the own-gender bias.", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/2df6/aa6dd2683fb35f7dcd8536d7b67fb72ede12.pdf"}, {"id": "27c59f06bc62ea85e828228c1bb359e06f9232d8", "title": "Face detectionwith boostedGaussian features", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/27c5/9f06bc62ea85e828228c1bb359e06f9232d8.pdf"}, {"id": "d74e708a058f0590c7a2ad679af483dad46acac1", "title": "Multimodal gender detection", "year": "2017", "pdf": "http://dl.acm.org/citation.cfm?id=3136770"}, {"id": "4a1a5316e85528f4ff7a5f76699dfa8c70f6cc5c", "title": "Face Recognition using Local Features based on Two-layer Block Model", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/4a1a/5316e85528f4ff7a5f76699dfa8c70f6cc5c.pdf"}, {"id": "4ccae36f78c660c9fc01959e28e676142c5f8a7d", "title": "KCCA-based technique for profile face identification", "year": "2017", "pdf": "http://doi.org/10.1186/s13640-016-0123-8"}, {"id": "4344413b7814b2ba99cc79ead2903f259e98ed4b", "title": "Modelling Uncertainty in Representation of Facial Features for Face Recognition", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/dc27/1b25f54e13cc38a7d11bab8f3fd6a66dfe0a.pdf"}, {"id": "fec295c6b6a1795d8ccb4592603040794667dfa7", "title": "LDOP: Local Directional Order Pattern for Robust Face Retrieval", "year": "2018", "pdf": "https://arxiv.org/pdf/1803.07441.pdf"}, {"id": "d6f49b63e4e285ff2bb3ba92e1e10287d407d6c0", "title": "Tasks determine what is learned in visual statistical learning.", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/d6f4/9b63e4e285ff2bb3ba92e1e10287d407d6c0.pdf"}, {"id": "debbab855c250c003941fde1e799570f0b18faab", "title": "An effective Gabor-feature selection method for face recognition", "year": 2009, "pdf": null}, {"id": "0ecdbaf1d77b9c0f16c1d52210011415f7c6396f", "title": "An evaluation of Gabor orientation as a feature for face recognition", "year": 2008, "pdf": "http://figment.cse.usf.edu/~sfefilat/data/papers/MoAT7.3.pdf"}, {"id": "d3e980c3f88b98ccde525f2ddcc8df3abca1ca01", "title": "Binarized eigenphases applied to limited memory face recognition systems", "year": 2008, "pdf": "https://doi.org/10.1007/s10044-008-0129-7"}, {"id": "64699c4546b66323d4af7d1e44ddb5912991c00c", "title": "Design and Performance Evaluation of Robust Digital Audio Watermarking under Low Bits Rates", "year": 2015, "pdf": null}, {"id": "fa2f0577f07fdbe6076ccc6e5a9f54eab5247d53", "title": "Fast PCA-based face recognition on GPUs", "year": 2013, "pdf": "https://doi.org/10.1109/ICASSP.2013.6638138"}, {"id": "de4b5d00cba99d355f10eb1565b3644b4485cddd", "title": "Estimating in-plane rotation angle for face images from multi-poses", "year": 2013, "pdf": "https://doi.org/10.1109/CIBIM.2013.6607914"}, {"id": "749382d19bfe9fb8d0c5e94d0c9b0a63ab531cb7", "title": "A Modular Framework to Detect and Analyze Faces for Audience Measurement Systems", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/7493/82d19bfe9fb8d0c5e94d0c9b0a63ab531cb7.pdf"}, {"id": "dfe97eb3304660c252c94c7eed6fc1c0970191ce", "title": "Robust Discriminant Analysis of Gabor Feature for Face Recognition", "year": 2007, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FSKD.2007.490"}, {"id": "21f7980a22300983e1cb0fa02a9c300045a08740", "title": "Methodological improvement on local Gabor face recognition based on feature selection and enhanced Borda count", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/763e/c7dc4de6eff81df5a56e4adc3331a93c926c.pdf"}, {"id": "f209ac3162ad9443f80dac257427f60913be78df", "title": "Dual LDA - an effective feature space reduction method for face recognition", "year": 2005, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AVSS.2005.1577291"}, {"id": "477bbcb5655a9c64893207bb49032e87c06a05f2", "title": "Eleos: ExitLess OS Services for SGX Enclaves", "year": 2017, "pdf": "http://doi.acm.org/10.1145/3064176.3064219"}, {"id": "619442b4e5503e80941ce7de87eb12da0cce2b23", "title": "Face image super-resolution with pose via nuclear norm regularized structural orthogonal Procrustes regression", "year": "2018", "pdf": "http://doi.org/10.1007/s00521-018-3826-1"}, {"id": "003f161768db4b70358971d40eb8e1ad535f001f", "title": "Face recognition from single sample based on human face perception", "year": 2009, "pdf": "http://ro.uow.edu.au/cgi/viewcontent.cgi?article=9459&context=infopapers"}, {"id": "dc4289ca959b36f745d8e4b9383b673e2b95db47", "title": "Infants\u2019 experience-dependent processing of male and female faces: Insights from eye tracking and event-related potentials", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/dc42/89ca959b36f745d8e4b9383b673e2b95db47.pdf"}, {"id": "25fce91ce1b974865506c14d2e4714d8db2672d1", "title": "Towards a Practical Face Recognition System: Robust Alignment and Illumination by Sparse Representation", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/6c1c/3c54359bb082f0e2c51c1ca7daa52da5f57c.pdf"}, {"id": "0f00a04c4a8c92e070b50ab411df4cd31d2cbe97", "title": "Face Recognition with One Training Image per Person", "year": 2002, "pdf": "http://pdfs.semanticscholar.org/0f00/a04c4a8c92e070b50ab411df4cd31d2cbe97.pdf"}, {"id": "e295c1aa47422eb35123053038e62e9aa50a2e3a", "title": "ChaLearn Looking at People 2015: Apparent Age and Cultural Event Recognition Datasets and Results", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406389"}, {"id": "4d4be112c180d5a4484fe6e17e506ad6e1853f08", "title": "Improving long range and high magnification face recognition: Database acquisition, evaluation, and enhancement", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/4d4b/e112c180d5a4484fe6e17e506ad6e1853f08.pdf"}, {"id": "711f6446d0b3897b892c5c19c7d88fe50424eef9", "title": "High and low performers differ in the use of shape information for face recognition.", "year": 2013, "pdf": null}, {"id": "3c47022955c3274250630b042b53d3de2df8eeda", "title": "Discriminant analysis with tensor representation", "year": 2005, "pdf": "http://research.microsoft.com/en-us/um/people/leizhang/paper/cvpr05-shuicheng-discriminant.pdf"}, {"id": "36018404263b9bb44d1fddaddd9ee9af9d46e560", "title": "Occluded Face Recognition by Using Gabor Features", "year": 2002, "pdf": "http://pdfs.semanticscholar.org/3601/8404263b9bb44d1fddaddd9ee9af9d46e560.pdf"}, {"id": "29aa9c557a46ad214a4236143d93072ab018841e", "title": "L1-Norm Distance Linear Discriminant Analysis Based on an Effective Iterative Algorithm", "year": 2018, "pdf": "https://doi.org/10.1109/TCSVT.2016.2596158"}, {"id": "2a628cc59bf229979ba4cb2530a7bea320aa3dc3", "title": "Measuring the degree of face familiarity based on extended NMF", "year": 2013, "pdf": "http://doi.acm.org/10.1145/2465780.2465782"}, {"id": "25afa24d85e693351bad795ee1c3e801d10c4a15", "title": "Anisotropic Gaussian Filters for Face Class Modeling", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/25af/a24d85e693351bad795ee1c3e801d10c4a15.pdf"}, {"id": "bcefb15246b1c9cea74a49a4ba1c990b6b97a19c", "title": "Review on the effects of age, gender, and race demographics on automatic face recognition", "year": "2017", "pdf": "http://doi.org/10.1007/s00371-017-1428-z"}, {"id": "0c4a139bb87c6743c7905b29a3cfec27a5130652", "title": "The FERET Verification Testing Protocol for Face Recognition Algorithms", "year": 1998, "pdf": "http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf"}, {"id": "018eaad53804a88d894a8370a94bb6db31a30ed4", "title": "ANN based human facial expression recognition in color images", "year": 2014, "pdf": null}, {"id": "1a66f37f37e4fbf9a8e657853933539256bbda88", "title": "Spectral Clustering by Joint Spectral Embedding and Spectral Rotation.", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/1a66/f37f37e4fbf9a8e657853933539256bbda88.pdf"}, {"id": "53d8946fd7dcc9e35ddf92096b372c8988c0cce9", "title": "Wavelet packet analysis for face recognition", "year": 2000, "pdf": "http://pdfs.semanticscholar.org/53d8/946fd7dcc9e35ddf92096b372c8988c0cce9.pdf"}, {"id": "ac6f23544161c723567812f96a4cd71ed47041fc", "title": "On solving the face recognition problem with one training sample per subject", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/ac6f/23544161c723567812f96a4cd71ed47041fc.pdf"}, {"id": "ba7c01e1432bffc2fcde824d0b0ebd25ad7238c3", "title": "Face Recognition Techniques : A Review", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/ba7c/01e1432bffc2fcde824d0b0ebd25ad7238c3.pdf"}, {"id": "b664dc4902902b3753ea862378f878487b13cc22", "title": "Employing vector quantization algorithm in a transform domain for facial recognition", "year": 2016, "pdf": null}, {"id": "e8420101f54438859ca5cf85ecee01b265bb1568", "title": "Robust Generalized Low Rank Approximation of Matrices for image recognition", "year": 2016, "pdf": "https://doi.org/10.1109/ISSPIT.2016.7886035"}, {"id": "d944ff789af84cecc0a913da964e017408687d62", "title": "Image Parsing: Segmentation, Detection, and Recognition", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/d944/ff789af84cecc0a913da964e017408687d62.pdf"}, {"id": "adefabe194863b4f764ec982e3120554165c841c", "title": "Radius based Block Local Binary Pattern on T-Zone Face Area for Face Recognition", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/adef/abe194863b4f764ec982e3120554165c841c.pdf"}, {"id": "3c4014886b3aa01024e3a1e81823a95a57fcf215", "title": "Functional Alterations in Order Short-Term Memory Networks in Adults With Dyslexia.", "year": 2015, "pdf": null}, {"id": "229317fe9b066b6f6f4961920123a03d777dd283", "title": "A novel face recognition method based on IWLD and IWBC", "year": 2015, "pdf": "https://doi.org/10.1007/s11042-015-2623-4"}, {"id": "a7f26379346be7c7d4b5511bad7de534c689273e", "title": "Use of Machine Learning for Detection of Unaware Facial Recognition Without Individual Training", "year": 2017, "pdf": "https://doi.org/10.1109/ICMLA.2017.00-31"}, {"id": "fed593e37a3f06619e5443c4124db0620d064c51", "title": "2D facial landmark model design by combining key points and inserted points", "year": "2015", "pdf": "http://doi.org/10.1016/j.eswa.2015.06.015"}, {"id": "03d9ccce3e1b4d42d234dba1856a9e1b28977640", "title": "Facial Affect \"In-the-Wild\": A Survey and a New Database", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/03d9/ccce3e1b4d42d234dba1856a9e1b28977640.pdf"}, {"id": "a3dc109b1dff3846f5a2cc1fe2448230a76ad83f", "title": "Active Appearance Model and Pca Based Face Recognition System", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/a3dc/109b1dff3846f5a2cc1fe2448230a76ad83f.pdf"}, {"id": "403b4deb13545144a34bee855042877f8b0ed809", "title": "A comparative survey on supervised classifiers for face recognition", "year": 2014, "pdf": "https://doi.org/10.1109/CCST.2014.6987036"}, {"id": "18638b148424081fc36f63755feeee6e30285f87", "title": "Eye-verifier using ternary template for reliable eye detection in facial color images", "year": 2009, "pdf": "https://www.researchgate.net/profile/Hakil_Kim2/publication/251907813_Eye-verifier_using_ternary_template_for_reliable_eye_detection_in_facial_color_images/links/54016c480cf23d9765a495ef.pdf"}, {"id": "fb1919aeaa26f2c559268c52deb68463ee58abee", "title": "Local matching Gabor entropy weighted face recognition", "year": 2011, "pdf": "https://doi.org/10.1109/FG.2011.5771394"}, {"id": "9f5fbc661ce13d1f946f5d3c5b194aab71e0511b", "title": "Combining local similarity measures: summing, voting, and weighted voting", "year": 2005, "pdf": "https://doi.org/10.1109/ICSMC.2005.1571551"}, {"id": "699ee890e93384b386f1d44f011ee47bd8a17b07", "title": "A SVM-based model for the evaluation of biometric sample quality", "year": "2011", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5949212"}, {"id": "783f22f9ad77e437438f24f2d0a7c1397468ec88", "title": "A New Quadratic Classifier Applied to Biometric Recognition", "year": "2002", "pdf": "https://pdfs.semanticscholar.org/783f/22f9ad77e437438f24f2d0a7c1397468ec88.pdf"}, {"id": "cdb61fc4841e68c574f87137d93d8c63aca4ad89", "title": "Gender-selective neural populations: evidence from event-related fMRI repetition suppression", "year": "2013", "pdf": "http://doi.org/10.1007/s00221-013-3429-0"}, {"id": "309e17e6223e13b1f76b5b0eaa123b96ef22f51b", "title": "Face recognition based on a 3D morphable model", "year": 2006, "pdf": "https://static.aminer.org/pdf/PDF/000/337/771/image_synthesis_and_face_recognition_based_on_d_face_model.pdf"}, {"id": "420921277310ea97a819e676f626dedec734d4c7", "title": "On the Dimensionality of Face Space", "year": 2007, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2007.1033"}, {"id": "d5e1173dcb2a51b483f86694889b015d55094634", "title": "PCA and LDA in DCT domain", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/d5e1/173dcb2a51b483f86694889b015d55094634.pdf"}, {"id": "b1898f8fe31147779a841f56795a776db6699efd", "title": "Automatic Detection of the Optimal Acceptance Threshold in a Face Verification System", "year": 2004, "pdf": "http://pdfs.semanticscholar.org/b189/8f8fe31147779a841f56795a776db6699efd.pdf"}, {"id": "7d8354627468f1cb236c9f6f42c317c9c09f0c85", "title": "A DCT-based Multimanifold face recognition method using single sample per person", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/7d83/54627468f1cb236c9f6f42c317c9c09f0c85.pdf"}, {"id": "59c194bc84d604baf09241238dc47806a998df70", "title": "Building a post-compression region-of-interest encryption framework for existing video surveillance systems", "year": 2015, "pdf": "https://doi.org/10.1007/s00530-015-0473-6"}, {"id": "101c7d54194c02600865d7b0c638e6bfd428788c", "title": "Illumination-insensitive texture discrimination based on illumination compensation and enhancement", "year": "2014", "pdf": "http://doi.org/10.1016/j.ins.2014.01.019"}, {"id": "b65ed4a745168ba5a336a593635fcb3f3c3ae60d", "title": "Face recognition using holistic Fourier invariant features", "year": 2001, "pdf": "http://pdfs.semanticscholar.org/b65e/d4a745168ba5a336a593635fcb3f3c3ae60d.pdf"}, {"id": "bcaa5fab589d95890d539a3119657fa253176f0d", "title": "Evaluating the Efficiency of a Night-Time, Middle-Range Infrared Sensor for Applications in Human Detection and Recognition", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/bcaa/5fab589d95890d539a3119657fa253176f0d.pdf"}, {"id": "fb36f2badc18977f469fb8b8be54a4a6f9ba1068", "title": "Nearest Feature Line: A Tangent Approximation", "year": 2008, "pdf": null}, {"id": "6974449ce544dc208b8cc88b606b03d95c8fd368", "title": "Local Evidence Aggregation for Regression-Based Facial Point Detection", "year": 2013, "pdf": "https://ibug.doc.ic.ac.uk/media/uploads/documents/martinezvalstar-pami_final.pdf"}, {"id": "7373584a27320824dc9bc0bae58364f701a1210b", "title": "Collaborative Sparse Preserving Projections for Feature Extraction", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICSS.2016.41"}, {"id": "a8e785333eda0b3041c7b331ee76164f563fba7d", "title": "Face Demorphing", "year": 2018, "pdf": "https://doi.org/10.1109/TIFS.2017.2777340"}, {"id": "708161b78b049fe9891a8fb2b2e6cd1588591bf0", "title": "Extraction of Craniofacial Landmarks for Preoperative to Intraoperative Registration", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/7835/9b9764cc7e2276c63892746d457d25a68864.pdf"}, {"id": "ff398e7b6584d9a692e70c2170b4eecaddd78357", "title": "Title of dissertation : FACE RECOGNITION AND VERIFICATION IN UNCONSTRAINED ENVIRIONMENTS", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/ff39/8e7b6584d9a692e70c2170b4eecaddd78357.pdf"}, {"id": "31daab398f84819a98c966801a3795140d080c5f", "title": "Face Recognition Based on PLS and HMM", "year": 2009, "pdf": null}, {"id": "db9ef28cc3531a27c273d769e1b1d6b8aeff2db4", "title": "Linguistic descriptors in face recognition: A literature survey and the perspectives of future development", "year": 2015, "pdf": null}, {"id": "b0b49b1626a9c33843eff609fdb42decdd1f8b90", "title": "A classification of emotion and gender using approximation image Gabor local binary pattern", "year": 2017, "pdf": null}, {"id": "af36ce6d1f2cbc61dff80526c530b714f797cc50", "title": "Performance Comparison of Major Classical Face Recognition Techniques", "year": 2014, "pdf": "https://doi.org/10.1109/ICMLA.2014.91"}, {"id": "23adc523fc790aa0ca450f73a44b71a792bdc85b", "title": "Classification in an informative sample subspace", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/23ad/c523fc790aa0ca450f73a44b71a792bdc85b.pdf"}, {"id": "3687bad2caa2d323941e6ec343e9156fca9cf606", "title": "Super Resolution of Images and Video", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/3687/bad2caa2d323941e6ec343e9156fca9cf606.pdf"}, {"id": "c83dba889132f0d2b909474e5e187f254bd09e29", "title": "Fourier Power Spectrum Characteristics of Face Photographs: Attractiveness Perception Depends on Low-Level Image Properties", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/c83d/ba889132f0d2b909474e5e187f254bd09e29.pdf"}, {"id": "db326135c998b7f1f6ae61de015f09e90206d3ff", "title": "Real-time integrated face detection and recognition on embedded GPGPUs", "year": "2014", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6962350"}, {"id": "2862d405f9d1e957076195b0fefd1bdfe480ca8b", "title": "Parallel implementation of eigenface on CUDA", "year": 2014, "pdf": null}, {"id": "72fb849144aea8e01f141419914cde87bb7972f3", "title": "Fuzzy Bidirectional Weighted Sum for Face Recognition", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/72fb/849144aea8e01f141419914cde87bb7972f3.pdf"}, {"id": "0480b458439069687ec41c90178ba7e9a056bcca", "title": "Gender Classification Using Gradient Direction Pattern", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/0480/b458439069687ec41c90178ba7e9a056bcca.pdf"}, {"id": "3172fac25c7bd516867933487b8f04c0e23b4f43", "title": "On-line learning parts-based representation via incremental orthogonal projective non-negative matrix factorization", "year": 2013, "pdf": "https://doi.org/10.1016/j.sigpro.2012.07.015"}, {"id": "4967b0acc50995aa4b28e576c404dc85fefb0601", "title": "An Automatic Face Detection and Gender Classification from Color Images using Support Vector Machine", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/4967/b0acc50995aa4b28e576c404dc85fefb0601.pdf"}, {"id": "916318d7fb755b13de566a8a21625d47541d0d3d", "title": "Efficient face candidates selector for face detection", "year": "2003", "pdf": "http://doi.org/10.1016/S0031-3203%2802%2900165-6"}, {"id": "ca30c8f28642303e8f9095fa5d1e908f194cd586", "title": "Nonparametric maximum margin criterion for face recognition", "year": 2005, "pdf": "https://doi.org/10.1109/ICIP.2005.1530206"}, {"id": "761db68bf6031545cc865a813f398cb4ee8f61a1", "title": "Feature to Feature Matching for LBP Based Face Recognition", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/761d/b68bf6031545cc865a813f398cb4ee8f61a1.pdf"}, {"id": "8768b8a972debe697ee5f8c3bd67a6df6ff44977", "title": "A Comparative Study on Image Hashing for Document Authentication", "year": 2015, "pdf": "http://subs.emis.de/LNI/Proceedings/Proceedings245/231.pdf"}, {"id": "27d2e977356915c63c4562fe41df9e9ed0290f15", "title": "A Hierarchical Compositional Model for Face Representation and Sketching", "year": 2008, "pdf": "http://www.stat.ucla.edu/~sczhu/papers/PAMI_face_sketch.pdf"}, {"id": "6b4d1c0ddf606c84148edd889db231f67703ef3e", "title": "A comparison of techniques for robust gender recognition", "year": 2011, "pdf": "https://doi.org/10.1109/ICIP.2011.6116610"}, {"id": "d8d274e679d8ead02a38fb2d6b3c100ccf99fdde", "title": "An algorithm for training a large scale support vector machine for regression based on linear programming and decomposition methods", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/d8d2/74e679d8ead02a38fb2d6b3c100ccf99fdde.pdf"}, {"id": "e3a3a6c1f4802ea1cd0c34d0b34e4c83689895ac", "title": "An effective unconstrained correlation filter and its kernelization for face recognition", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/e3a3/a6c1f4802ea1cd0c34d0b34e4c83689895ac.pdf"}, {"id": "3cc3e01ac1369a0d1aa88fedda61d3c99a98b890", "title": "Discriminative Feature Co-Occurrence Selection for Object Detection", "year": 2008, "pdf": "http://mi.eng.cam.ac.uk/~bdrs2/papers/mita_pami08.pdf"}, {"id": "23a2b75c92123b3e7bbaf1d98e434845167fe259", "title": "Multimodal Biometrics for Identity Documents", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/b673/2510242b637055de29ad59e3646b66638220.pdf"}, {"id": "d930ec59b87004fd172721f6684963e00137745f", "title": "Face Pose Estimation using a Tree of Boosted Classifiers", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/d930/ec59b87004fd172721f6684963e00137745f.pdf"}, {"id": "e59adfa9ff6377f54a5514701cc783cf8ff79628", "title": "Boosting non-graph matching feature-based face recognition with a multi-stage matching strategy", "year": 2017, "pdf": "https://doi.org/10.1142/S0219691317500175"}, {"id": "0c9b266bc2573e7266636c88398edf3895268777", "title": "Cross-Layer Optimization and Effective Airtime Estimation for Wireless Video Streaming", "year": 2012, "pdf": "https://doi.org/10.1109/ICCCN.2012.6289275"}, {"id": "2a2df7e790737a026434187f9605c4763ff71292", "title": "Towards nonuniform illumination face enhancement via adaptive contrast stretching", "year": "2017", "pdf": "http://doi.org/10.1007/s11042-017-4665-2"}, {"id": "e072acd8a041b90cd86c39f90f235eea7feb37cf", "title": "Projection-optimal tensor local fisher discriminant analysis for image feature extraction", "year": 2013, "pdf": "https://doi.org/10.1109/ICIP.2013.6738587"}, {"id": "a6f13ef1eaa93b8532be9a36815e1cfa25efec6d", "title": "Representation of multiple objects in macaque category-selective areas", "year": "2018", "pdf": "http://doi.org/10.1038/s41467-018-04126-7"}, {"id": "9c81d436b300494bc88d4de3ac3ec3cc9c43c161", "title": "Discriminative unsupervised 2D dimensionality reduction with graph embedding", "year": 2017, "pdf": "https://doi.org/10.1007/s11042-017-5019-9"}, {"id": "4ee2421bdbb71712522b0db86864fa9ac1474582", "title": "Privacy &amp; Security Issues Related to Match Scores", "year": 2006, "pdf": null}, {"id": "32420c65f8ef0c5bd83b14c8ae662cbce73e6781", "title": "Face Recognition with Local Binary Patterns", "year": 2004, "pdf": "http://pdfs.semanticscholar.org/3242/0c65f8ef0c5bd83b14c8ae662cbce73e6781.pdf"}, {"id": "1505a18901aeffeeba6aa48c54775a2b2cc67579", "title": "Unified probabilistic models for face recognition from a single example image per person", "year": 2004, "pdf": "https://doi.org/10.1007/BF02944908"}, {"id": "e3f6108163b64ca4aa94d9be77c753b377fcda87", "title": "Using Competitive Prototypes for the Verification on Unspecific Person", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/e3f6/108163b64ca4aa94d9be77c753b377fcda87.pdf"}, {"id": "5f11893a16c0c2c6b685befe4ac23e86c9e9f59f", "title": "Decremental generalized discriminative common vectors applied to images classification", "year": 2017, "pdf": "https://doi.org/10.1016/j.knosys.2017.05.020"}, {"id": "e5c468c859faf03954d9440fa33b913d01c65141", "title": "Retina alap\u00fa mintav\u00e9telez\u00e9s arckomponens detekt\u00e1l\u00e1si feladaton", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/e5c4/68c859faf03954d9440fa33b913d01c65141.pdf"}, {"id": "3f5cf3771446da44d48f1d5ca2121c52975bb3d3", "title": "All the Images of an Outdoor Scene", "year": 2002, "pdf": "http://pdfs.semanticscholar.org/3f5c/f3771446da44d48f1d5ca2121c52975bb3d3.pdf"}, {"id": "140eaf273eabe233f67257d1ae7ee44a8f21e502", "title": "Computational Maps in the Visual Cortex", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/617b/db84d9c07becd955fedbb98c1cd3109502fe.pdf"}, {"id": "b7cf7bb574b2369f4d7ebc3866b461634147041a", "title": "From NLDA to LDA/GSVD: a modified NLDA algorithm", "year": 2011, "pdf": "https://doi.org/10.1007/s00521-011-0728-x"}, {"id": "2ab214d5967f964dda4e6610a9694dc4bcb44550", "title": "Recursive Bayesian Linear Regression for Adaptive Classification", "year": 2009, "pdf": null}, {"id": "c599e49afcfc0aa4910cec58b5fc1198153514bc", "title": "A geometrical-model-based face recognition", "year": 2015, "pdf": "https://doi.org/10.1109/ICIP.2015.7351375"}, {"id": "614524b27188bb8869ec7a5b374c2a9874f96ec5", "title": "A new covariance estimate for Bayesian classifiers in biometric recognition", "year": 2004, "pdf": "http://fei.edu.br/~cet/csvt04.pdf"}, {"id": "4b144a0a5f8b58d3bf311c121e9cf79fbf91cc52", "title": "From classifiers to discriminators: A nearest neighbor rule induced discriminant analysis", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/4b14/4a0a5f8b58d3bf311c121e9cf79fbf91cc52.pdf"}, {"id": "dcad4d6a6517dd2f76dfe3c8539333d9e40ab90c", "title": "Set Face Recognition Using Transduction", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/dcad/4d6a6517dd2f76dfe3c8539333d9e40ab90c.pdf"}, {"id": "a3334cdf92d1157dd691d88189e793d18164cd7b", "title": "Precise eye detection on frontal view face image", "year": 2009, "pdf": "http://doi.acm.org/10.1145/1734605.1734619"}, {"id": "2acf319c5eac89cc9e0ed24633e4408dbd4a8a5b", "title": "The Effect of Distance Measures on the Recognition Rates of PCA and LDA Based Facial Recognition", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/2acf/319c5eac89cc9e0ed24633e4408dbd4a8a5b.pdf"}, {"id": "f60311fc10d41aaf672b9ffccb815e3d5300678d", "title": "Curvelet-based feature extraction with B-LDA for face recognition", "year": 2009, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AICCSA.2009.5069362"}, {"id": "db67edbaeb78e1dd734784cfaaa720ba86ceb6d2", "title": "SPECFACE \u2014 A dataset of human faces wearing spectacles", "year": "2016", "pdf": "https://arxiv.org/pdf/1509.04853.pdf"}, {"id": "53b8288f71340a98c2531cd3e152c94613a38cae", "title": "Weighted group sparse representation for undersampled face recognition", "year": 2014, "pdf": "https://doi.org/10.1016/j.neucom.2014.05.012"}, {"id": "6cd497a9a66daa2d0c867dc07412d958791f3499", "title": "Speeding Up Permutation Based Indexing with Indexing", "year": 2009, "pdf": "http://www.cs.uku.fi/~fredriks/pub/papers/sisap09.pdf"}, {"id": "036f2001767de3d9b6b396d2d829f8f030b056b3", "title": "Super-Resolution Method for Multiview Face Recognition From a Single Image Per Person Using Nonlinear Mappings on Coherent Features", "year": 2012, "pdf": "https://doi.org/10.1109/LSP.2012.2186961"}, {"id": "3f4f1cabe336948b80c819b910581e0f338c4b4a", "title": "Gender classification of full body images based on the convolutional neural network", "year": 2017, "pdf": "https://doi.org/10.1109/SPAC.2017.8304366"}, {"id": "12c708a709480722aae9324648d0404ec55f151e", "title": "Continuous recognition with incremental learning on Grassmann manifolds", "year": 2017, "pdf": null}, {"id": "514fb60de79497ffb4b9490e12b11a9c94036ec3", "title": "Discriminant Locality Preserving Projections Based on L1-Norm Maximization", "year": 2014, "pdf": "https://doi.org/10.1109/TNNLS.2014.2303798"}, {"id": "35e4b6c20756cd6388a3c0012b58acee14ffa604", "title": "Gender Classification in Large Databases", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/35e4/b6c20756cd6388a3c0012b58acee14ffa604.pdf"}, {"id": "42350e28d11e33641775bef4c7b41a2c3437e4fd", "title": "Multilinear Discriminant Analysis for Face Recognition", "year": 2007, "pdf": "http://mmlab.ie.cuhk.edu.hk/archive/2007/IP07_face02.pdf"}, {"id": "0e0bfe54d1f6ff1ad831c552894ebed09e3e485d", "title": "Face recognition using improved local line binary pattern", "year": 2012, "pdf": "https://doi.org/10.1117/12.968566"}, {"id": "1e472cf9a290e8f59573628dba426cd6d74411f4", "title": "A Large-Scale Software-Generated Face Composite Sketch Database", "year": 2016, "pdf": "https://doi.org/10.1109/BIOSIG.2016.7736902"}, {"id": "ed12b0a9d2bf6316b32a2bd143f33a85582dc91b", "title": "Determining Optimal Malsburg Gabor Kernel for Efficient Non-Rigid Object Recognition", "year": 2007, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FBIT.2007.111"}, {"id": "d0318d0e1f87c16e1e4718b8166743f617c62a47", "title": "Null space-based LDA with weighted dual personal subspaces for face recognition", "year": 2005, "pdf": "https://doi.org/10.1109/ICIP.2005.1530210"}, {"id": "4ef4553de88191fd46ec8000589c3fc1988f06b5", "title": "The Application of Decision Tree in Gender Classification", "year": 2008, "pdf": null}, {"id": "24d3e695af619e88613aba7dc0e7492c12fa4d0e", "title": "Sparsest Matrix based Random Projection for Classification", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/24d3/e695af619e88613aba7dc0e7492c12fa4d0e.pdf"}, {"id": "aae29ef5ab6f9f24e8ab7985386c23294d2343cc", "title": "Supervised neighborhood regularized collaborative representation for face recognition", "year": "2017", "pdf": "http://doi.org/10.1007/s11042-017-4851-2"}, {"id": "ceaf24a1c756938125ef5865ec2183730385d7ea", "title": "Discussions on Some Problems in Face Recognition", "year": 2004, "pdf": "https://doi.org/10.1007/978-3-540-30548-4_7"}, {"id": "50b290b0b2d14a3f0f9cfd17b15d87b91d76bff1", "title": "Gender classification using spatial and temporal features", "year": 2013, "pdf": null}, {"id": "e825811cfd92a8be5be2caee67fc7a48ed2f5df0", "title": "Meta-Analysis of Face Recognition Algorithms", "year": "2002", "pdf": "https://pdfs.semanticscholar.org/e825/811cfd92a8be5be2caee67fc7a48ed2f5df0.pdf"}, {"id": "831cbffbfe39a059b1212d49e8fdfd458d1d01c5", "title": "Globally Maximizing, Locally Minimizing: Unsupervised Discriminant Projection with Applications to Face and Palm Biometrics", "year": 2007, "pdf": "http://ira.lib.polyu.edu.hk/bitstream/10397/219/1/264.pdf"}, {"id": "95de749dd1c3451d0842ecf33101244a1fa9d4af", "title": "Temporal Dynamics Underlying the Modulation of Social Status on Social Attention", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/95de/749dd1c3451d0842ecf33101244a1fa9d4af.pdf"}, {"id": "c70ea40140e2f73e78d9d54335fbacce46d4d0b1", "title": "Enhanced collaborative representation based Classification", "year": 2014, "pdf": null}, {"id": "33648b41b4c9a776dc059bee96d6df994009828c", "title": "Memory and Perception-based Facial Image Reconstruction", "year": 2017, "pdf": null}, {"id": "7438582019689ce42ba4427a24bb7295438f82b8", "title": "A VQ-based fast face recognition algorithm using optimized codebook", "year": 2008, "pdf": null}, {"id": "124476c2815bbfb523c77943c74356f94f79b580", "title": "Recognition of Faces in Unconstrained Environments: A Comparative Study", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/c86c/28b45687ce549cc449e84c6a7087012c21f7.pdf"}, {"id": "756083723f91af82e55b8ad6b4ea7965d46c4ca8", "title": "A low complexity and efficient face recognition approach in JPEG compressed domain using quantized coefficients", "year": "2010", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5734128"}, {"id": "4a9db3fd76ad7d2d95f862d50e97247f2c16502a", "title": "A FPGA implementation of facial feature extraction", "year": 2012, "pdf": "https://doi.org/10.1007/s11554-012-0263-8"}, {"id": "726fc899b4b1ebbd607e6a0934e86383e5675440", "title": "Event-Related Potential Correlates of Long-Term Memory for Briefly Presented Faces", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/726f/c899b4b1ebbd607e6a0934e86383e5675440.pdf"}, {"id": "478f1dd9eb44b8de17fc3a7a0e45861fe7bf0fe3", "title": "An efficient face recognition for variant illumination condition", "year": 2004, "pdf": null}, {"id": "74c8116d647612e8cd20a2528eeed38f76d09126", "title": "Measuring measures for face sample quality", "year": 2011, "pdf": null}, {"id": "c126f053855396f9e1ac1a408201d50d5280f79f", "title": "FPGA implementation of an embedded face detection system based on LEON3", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/c126/f053855396f9e1ac1a408201d50d5280f79f.pdf"}, {"id": "7e56d9ebd47490bb06a8ff0bd5bcd8672ec52364", "title": "Enhanced independent component analysis and its application to content based face image retrieval", "year": "2004", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1275543"}, {"id": "fd6d4673178cbecba3b2535c740f28cbdf901f13", "title": "Local Directional Pattern (LDP) – A Robust Image Descriptor for Object Recognition", "year": 2010, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AVSS.2010.17"}, {"id": "2e2935a7489ae55fe36af6980523f8d587c18935", "title": "On testing methods for biometric authentication", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/2e29/35a7489ae55fe36af6980523f8d587c18935.pdf"}, {"id": "d31328b12eef33e7722b8e5505d0f9d9abe2ffd9", "title": "Deep Unsupervised Domain Adaptation for Face Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373866"}, {"id": "59ec9bef0d331444db7d763960095213eecb3b20", "title": "Invariant Face Recognitionin a Network of Cortical Columns", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/59ec/9bef0d331444db7d763960095213eecb3b20.pdf"}, {"id": "bc8373b3d4110786a597b21f3ae9c8e5ffd34a2e", "title": "Optimal Gabor kernel location selection for face recognition", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/bc83/73b3d4110786a597b21f3ae9c8e5ffd34a2e.pdf"}, {"id": "a432f815d753121267ffb524f8fabac21be32733", "title": "Proyecto Aguara\u0301: Automatic Face Recognition", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/a432/f815d753121267ffb524f8fabac21be32733.pdf"}, {"id": "3909f51a6850436d7e1e27ca2a16d6db49773870", "title": "Using classifier ensembles to label spatially disjoint data", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/3909/f51a6850436d7e1e27ca2a16d6db49773870.pdf"}, {"id": "95738166f0e8e66af1b23e28af2a6834c86e12e0", "title": "Local binary pattern feature vector extraction with CNN", "year": 2005, "pdf": null}, {"id": "00d931eccab929be33caea207547989ae7c1ef39", "title": "The Natural Input Memory Model", "year": 2004, "pdf": "http://pdfs.semanticscholar.org/00d9/31eccab929be33caea207547989ae7c1ef39.pdf"}, {"id": "3e72a9a5b7d184b72771ea4519cf9fa0253cdecd", "title": "Spatial maps in frontal and prefrontal cortex.", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/3e72/a9a5b7d184b72771ea4519cf9fa0253cdecd.pdf"}, {"id": "cee80d22cbadad649424bb60a548fad7fb437e68", "title": "Authentication enhancement techniques for BAC in 2G E-passport", "year": 2016, "pdf": null}, {"id": "ec1a57e609eda72b4eb60155fac12db1da31f6c0", "title": "Probabilistic Linear Discriminant Analysis", "year": 2006, "pdf": "https://doi.org/10.1007/11744085_41"}, {"id": "75f0d2d8aae00da73e4122c712ad230cfebe7729", "title": "A Massively Parallel Face Recognition System", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/75f0/d2d8aae00da73e4122c712ad230cfebe7729.pdf"}, {"id": "2eb610d67ac07136fce4d9633edc28548aab76c8", "title": "The Good, the Bad, and the Ugly Face Challenge Problem", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/580f/c690470f513bfa74b433c9a1f073a288fac1.pdf"}, {"id": "8d5945ef2361511a17719c9efe9e2d005247029e", "title": "Look Ma! No Network!: PCA of Gabor Filters Models the Development of Face Discrimination", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/8d59/45ef2361511a17719c9efe9e2d005247029e.pdf"}, {"id": "7be351e731eb9c3b71ad0c2a47ee8d300f7049be", "title": "Recognition for Objects by Relationships Between Attributes", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/7be3/51e731eb9c3b71ad0c2a47ee8d300f7049be.pdf"}, {"id": "157f95439cdc50f0903d59e406964dc2bc4b4d1e", "title": "Efficiency of Recognition Methods for Single Sample per Person Based Face Recognition", "year": "2011", "pdf": "http://doi.org/10.5772/18432"}, {"id": "d31e47f45041736c93ec23ba1dbaef6c311e76d6", "title": "TU\u0308BI\u0307TAK UZAY at TRECVID 2009: High-Level Feature Extraction and Content-Based Copy Detection", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/d31e/47f45041736c93ec23ba1dbaef6c311e76d6.pdf"}, {"id": "0d47b2b65c5c42291e90c591212e4e0b574a95c1", "title": "An adaptive hybrid pattern for noise-robust texture analysis", "year": 2015, "pdf": "https://doi.org/10.1016/j.patcog.2015.01.001"}, {"id": "22b1c7cc6ba65c5b5274ce4e8f017a6c1599c088", "title": "Affine normalized stockwell transform based face recognition", "year": 2015, "pdf": null}, {"id": "5a93f9084e59cb9730a498ff602a8c8703e5d8a5", "title": "Face Recognition using Local Quantized Patterns", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/5a93/f9084e59cb9730a498ff602a8c8703e5d8a5.pdf"}, {"id": "4d848b2055e1bba4ce80d5d050879c26686eda50", "title": "Face recognition with enhanced privacy protection", "year": 2009, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICASSP.2009.4959726"}, {"id": "077492a77812a68c86b970557e97a452a6689427", "title": "Automatic 3D face reconstruction from single images or video", "year": 2008, "pdf": "http://is.tuebingen.mpg.de/fileadmin/user_upload/files/publications/FG2008-Breuer_%5B0%5D.pdf"}, {"id": "247df1d4fca00bc68e64af338b84baaecc34690b", "title": "Evaluation of Gender Classification Methods with Automatically Detected and Aligned Faces", "year": 2008, "pdf": "http://www.cmlab.csie.ntu.edu.tw/~ked/review.pdf"}, {"id": "7252b7146697e7a7e33bddb5c49f18af2d0fc9db", "title": "Learning to See : Genetic and Environmental Influences on Visual Development", "year": 2002, "pdf": "http://pdfs.semanticscholar.org/7252/b7146697e7a7e33bddb5c49f18af2d0fc9db.pdf"}, {"id": "c41afaf882de1c7a94c05f881a7416bb60cfd0e8", "title": "Sex, cheating, and disgust: enhanced source memory for trait information that violates gender stereotypes.", "year": 2013, "pdf": null}, {"id": "06cdd818d3908e9fa05381a069bb99596c17ab33", "title": "Regularized Locality Preserving Projections and Its Extensions for Face Recognition", "year": 2010, "pdf": "https://doi.org/10.1109/TSMCB.2009.2032926"}, {"id": "3607afdb204de9a5a9300ae98aa4635d9effcda2", "title": "Face Description with Local Binary Patterns: Application to Face Recognition", "year": 2006, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244"}, {"id": "96fda9a7396a4d61d91e927ce885d4a32d6f7f01", "title": "Face Recognition Via Weighted Two Phase Test Sample Sparse Representation", "year": 2013, "pdf": "https://doi.org/10.1007/s11063-013-9333-6"}, {"id": "45c824c25e66b7bc1dd474f80cf2b0056b4fa6f8", "title": "Selection of Location, Frequency, and Orientation Parameters of 2D Gabor Wavelets for Face Recognition", "year": 2003, "pdf": "http://pdfs.semanticscholar.org/45c8/24c25e66b7bc1dd474f80cf2b0056b4fa6f8.pdf"}, {"id": "c59817e6fa375f715420d88ce024777b2961360d", "title": "Strong, Neutral, or Weak: Exploring the Impostor Score Distribution", "year": 2015, "pdf": "https://doi.org/10.1109/TIFS.2015.2403136"}, {"id": "0cdb9f0428721f848ff676ecadfaeb99896cbd13", "title": "Boosting part-sense multi-feature learners toward effective object detection", "year": 2011, "pdf": "https://doi.org/10.1016/j.cviu.2010.11.006"}, {"id": "880d024d830fd33c44d9c5e37a9c257966c27fe2", "title": "Super-Resolution Method for Face Recognition Using Nonlinear Mappings on Coherent Features", "year": 2011, "pdf": "https://doi.org/10.1109/TNN.2010.2089470"}, {"id": "3cca51f66b9693bc188557abe8a628580ae62aa2", "title": "The influence of perceptual similarity and individual differences on false memories in aging", "year": "2018", "pdf": "https://www.sciencedirect.com/science/article/pii/S0197458017303597?dgcid=api_sd_search-api-endpoint"}, {"id": "b14fb84db85ab4bbbcc8c92200e9c69b00737f91", "title": "A Review on Classifiers Used in Face Recognition Methods under Pose and Illumination Variation", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/b14f/b84db85ab4bbbcc8c92200e9c69b00737f91.pdf"}, {"id": "69f638309fe692f7d57a72d2df8fe2bf1d81dff4", "title": "A Study of Artificial Personality from the Perspective of the Observer", "year": 2004, "pdf": "http://pdfs.semanticscholar.org/69f6/38309fe692f7d57a72d2df8fe2bf1d81dff4.pdf"}, {"id": "77acb8847a76bfcc925f45387fb7abd4f2bd38ac", "title": "A novel polar-based human face recognition computational model.", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/77ac/b8847a76bfcc925f45387fb7abd4f2bd38ac.pdf"}, {"id": "2f1c3257ced690a0e1c38ddf7792e36b24fcc8fb", "title": "Selective local texture features based face recognition with single sample per class", "year": 2011, "pdf": "https://doi.org/10.1007/s13173-011-0049-z"}, {"id": "eefe8bd6384f565d2e42881f1f9a468d1672989d", "title": "Agent Based Object Recognition System for Autonomous Surveillance in Dynamic Environments", "year": 2010, "pdf": null}, {"id": "254d4206f4a68862e6e6c45b0938868e84cc67d0", "title": "Comparison of face image quality metrics: Electronic and legacy mug shots", "year": 2011, "pdf": "https://doi.org/10.1109/CIBIM.2011.5949219"}, {"id": "c28f57d0a22e54fdd3c4a57ecb1785dda49f0e5e", "title": "From Scores to Face Templates: A Model-Based Approach", "year": 2007, "pdf": "http://marathon.csee.usf.edu/~sarkar/PDFs/Scores%20to%20templates.pdf"}, {"id": "326611cbfaea5a413c6f9496bf96b27267a7f273", "title": "“Whitenedfaces” Recognition With PCA and ICA", "year": 2007, "pdf": null}, {"id": "e0611abbdfac10d1d9183a437f027094a26c0cfd", "title": "Two-step Feature Extraction in a Transform domain for face recognition", "year": 2017, "pdf": null}, {"id": "c9fddfbb8fd4b83421cddfc1495dfb4bb1f9ddbd", "title": "A massively parallel algorithm for local binary pattern based face recognition", "year": 2006, "pdf": "https://doi.org/10.1109/ISCAS.2006.1693438"}, {"id": "1d126e871e81dc2828685cf6112cb955bd9aae15", "title": "Driver monitoring for a human-centered driver assistance system", "year": 2006, "pdf": null}, {"id": "cac24ea3b301c4aba0f84470b175198c10d03199", "title": "Study of the Changing Trends in Facial Expression Recognition", "year": "2011", "pdf": "https://pdfs.semanticscholar.org/f411/e3c58f2036b3c1e1c251e7143ecd435e4b91.pdf"}, {"id": "ccf05c83dc1d9cfe2ef34d44e993980906720548", "title": "Gabor-Eigen-Whiten-Cosine: A Robust Scheme for Face Recognition", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/ccf0/5c83dc1d9cfe2ef34d44e993980906720548.pdf"}, {"id": "d5194642b3c68920e9fbf9839f5704db5a39ae9e", "title": "Real-time gender recognition with unaligned face images", "year": 2010, "pdf": null}, {"id": "299eb74b2a553c6ff1e3d756a19cfe6ac4b133a0", "title": "Face recognition by independent component analysis", "year": 2002, "pdf": "http://pdfs.semanticscholar.org/8d3c/93a160e6fc71ca4462f0e0a25bc75f4debae.pdf"}, {"id": "625fe925c88f642d8e082fc51c851362f94fafd2", "title": "Face Recognition via AAM and Multi-features Fusion on Riemannian Manifolds", "year": "2009", "pdf": "http://doi.org/10.1007/978-3-642-12297-2_57"}, {"id": "aa17a5e76ae21d0d068c7f10a4bb24c19888b685", "title": "SPARSE BAYESIAN LEARNING IN CLASSIFYING FACE FEATURE VECTORS", "year": "2008", "pdf": "https://pdfs.semanticscholar.org/aa17/a5e76ae21d0d068c7f10a4bb24c19888b685.pdf"}, {"id": "bc9c4b5379469a03415b5b35f8b627b2fbdbe967", "title": "Face Illumination Processing Using Wavelet Transform and Gradientfaces", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/bc9c/4b5379469a03415b5b35f8b627b2fbdbe967.pdf"}, {"id": "e181aca6e4b7142d2254a93477170e75c335d616", "title": "A Combined SIFT / SURF Descriptor for Automatic Face Recognition", "year": "2013", "pdf": "https://pdfs.semanticscholar.org/e181/aca6e4b7142d2254a93477170e75c335d616.pdf"}, {"id": "88740da3333ec14281a6defb55fff7c28025a758", "title": "Gender and gaze gesture recognition for human-computer interaction", "year": "2016", "pdf": "http://doi.org/10.1016/j.cviu.2016.03.014"}, {"id": "55c40cbcf49a0225e72d911d762c27bb1c2d14aa", "title": "Indian Face Age Database : A Database for Face Recognition with Age Variation", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf"}, {"id": "49612f6110f6863159fbd0e0be1253845102f0ff", "title": "Adaptive Feature Extraction Algorithm using Mixed Transforms for Facial Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8623830"}, {"id": "a83f7fd5b94303a42942e53299b1b369d7756ee1", "title": "Biometric image processing and recognition", "year": 1998, "pdf": "http://ieeexplore.ieee.org/document/7089743/"}, {"id": "f2cb0107e8de2b593447f878684f4d274ac4c338", "title": "Local binary pattern based face recognition with automatically detected fiducial points", "year": "2016", "pdf": "http://doi.org/10.3233/ICA-150506"}, {"id": "48af26cd5532b421af8c69238ff15c8252419e41", "title": "Lateral frontal pole and relational processing: Activation patterns and connectivity profile", "year": "2017", "pdf": "http://www.sciencedirect.com/science/article/pii/S0166432817300931"}, {"id": "3c5a43b280588b0e1883ff0ef87c10d81b8c09c3", "title": "Statistical non-uniform sampling of Gabor wavelet coefficients for face recognition", "year": 2005, "pdf": null}, {"id": "ed3084c2545f1c9ea6a95e5dbd95bbe74531da27", "title": "Efficient facial recognition using vector quantization of 2D DWT features", "year": 2016, "pdf": "https://doi.org/10.1109/ACSSC.2016.7869077"}, {"id": "de6ba16ee8ad07e2f02d685b1e98b8be5045cb1b", "title": "Adaptive discriminant learning for face recognition", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/de6b/a16ee8ad07e2f02d685b1e98b8be5045cb1b.pdf"}, {"id": "9e1a9d55a1760ca1a7007cf0d46416182dd4717f", "title": "Random translational transformation for changeable face verification", "year": 2009, "pdf": null}, {"id": "5a547df635a9a56ac224d556333d36ff68cbf088", "title": "Cross Local Gabor Binary Pattern Descriptor with Probabilistic Linear Discriminant Analysis for Pose-Invariant Face Recognition", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359041"}, {"id": "0d3ff34d8490a9a53de1aac1dea70172cb02e013", "title": "Cross-Database Evaluation of Normalized Raw Pixels for Gender Recognition under Unconstrained Settings", "year": 2014, "pdf": "https://doi.org/10.1109/ICPR.2014.542"}, {"id": "60529952f6346ebe26a3d4e5fdf79a925d68621f", "title": "Towards a Generalized Eigenspace-Based Face Recognition Framework", "year": 2002, "pdf": "http://pdfs.semanticscholar.org/6052/9952f6346ebe26a3d4e5fdf79a925d68621f.pdf"}, {"id": "987b49719617981c0f1a6d12134f660675144632", "title": "Robust real-time face recognition", "year": 2013, "pdf": "http://doi.acm.org/10.1145/2513456.2513494"}, {"id": "e4c3d5d43cb62ac5b57d74d55925bdf76205e306", "title": "Average Biased ReLU Based CNN Descriptor for Improved Face Retrieval", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/e4c3/d5d43cb62ac5b57d74d55925bdf76205e306.pdf"}, {"id": "e387db84cd31f14e468bb329ac008a80e645400e", "title": "Forensic Face Photo-Sketch Recognition Using a Deep Learning-Based Architecture", "year": 2017, "pdf": "https://doi.org/10.1109/LSP.2017.2749266"}, {"id": "590630990cf014f8c30296bc7a622d9dccc43163", "title": "Recognition of expression variant faces using masked log-Gabor features and Principal Component Analysis", "year": 2006, "pdf": "http://pdfs.semanticscholar.org/8369/f381e7eec92249a949f7b6ba6e9efb4468f2.pdf"}, {"id": "69295d7e773c90473b45becf921bae4c975f3a7e", "title": "A novel energy based filter for cross-blink eye detection", "year": 2012, "pdf": "https://doi.org/10.1109/ICIP.2012.6467242"}, {"id": "067e2b673e75a25c47cb0a5528dad41c02402251", "title": "Interactive Retrieval in Facial Image Database Using Self-Organizing Maps", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/bc52/77af56b1c5186304d57c367d91079ffb06f8.pdf"}, {"id": "0c889bfb0b62e051085528a252c5b837843674b5", "title": "An Evacuation Decision Model based on perceived risk, social influence and behavioural uncertainty", "year": 2016, "pdf": "https://doi.org/10.1016/j.simpat.2016.03.006"}, {"id": "0e4baf74dfccef7a99c6954bb0968a2e35315c1f", "title": "Gender identification from face images", "year": 2012, "pdf": "https://doi.org/10.1109/SIU.2012.6204517"}, {"id": "dd8ad6ce8701d4b09be460a6cf058fcd5318c700", "title": "Robust Face Recognition for Uncontrolled Pose and Illumination Changes", "year": 2013, "pdf": "https://doi.org/10.1109/TSMCA.2012.2192427"}, {"id": "6f5486c415a6aae48fbbc546358993d551ac1cd4", "title": "The time course of individual face recognition: A pattern analysis of ERP signals", "year": 2016, "pdf": "https://doi.org/10.1016/j.neuroimage.2016.03.006"}, {"id": "07c676e6c42df0f39966aa057a6dea67ed371264", "title": "Remembering beauty: Roles of orbitofrontal and hippocampal regions in successful memory encoding of attractive faces", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/07c6/76e6c42df0f39966aa057a6dea67ed371264.pdf"}, {"id": "938d363a87fa4020fe1e526c439f6f52e66c33c9", "title": "Formulating Face Verification With Semidefinite Programming", "year": 2007, "pdf": "https://doi.org/10.1109/TIP.2007.906271"}, {"id": "516eaf1b5d511008a3e0f401d84c5a7069200c67", "title": "Connection strategy and performance in sparsely connected 2D associative memory models with non-random images", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/71ec/b6d2f23cdb1ad6df86a4aa4d09d86bef7450.pdf"}, {"id": "31abc53cfaeb5181765917dba03c85a9e8de3c26", "title": "The Racially Diverse Affective Expression (RADIATE) Face Stimulus Set", "year": "2018", "pdf": "https://www.sciencedirect.com/science/article/pii/S0165178117321893"}, {"id": "c54fd2ea5fe45c1377a45c396fd68d3bc00a699e", "title": "Adaptive face identification for small-scale social dynamic environment", "year": 2014, "pdf": "https://doi.org/10.1109/MMAR.2014.6957427"}, {"id": "0cb7e4c2f6355c73bfc8e6d5cdfad26f3fde0baf", "title": "F Acial E Xpression R Ecognition Based on Wapa and Oepa F Ast Ica", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/0cb7/e4c2f6355c73bfc8e6d5cdfad26f3fde0baf.pdf"}, {"id": "6e82ce9897093ce4f5fa795887273992489c380d", "title": "Face recognition using Eigensurface on Kinect depth-maps", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/6e82/ce9897093ce4f5fa795887273992489c380d.pdf"}, {"id": "270567401251cad629f6d569febe95fe446a895c", "title": "A Pose Invariant Face Recognition system using Subspace Techniques", "year": 2005, "pdf": "http://pdfs.semanticscholar.org/2705/67401251cad629f6d569febe95fe446a895c.pdf"}, {"id": "5e8b9b0c2862a64fb6ed9841192a877b5ef0146d", "title": "Recognition memory for faces and scenes in amnesia: dissociable roles of medial temporal lobe structures.", "year": 2007, "pdf": "http://pdfs.semanticscholar.org/5e8b/9b0c2862a64fb6ed9841192a877b5ef0146d.pdf"}, {"id": "cc44f1d99b17a049a8186ec04c6a1ecf1906c3c8", "title": "Pose and Expression Independent Facial Landmark Localization Using Dense-SURF and the Hausdorff Distance", "year": 2013, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2012.87"}]}
\ No newline at end of file diff --git a/site/datasets/unknown/helen.json b/site/datasets/unknown/helen.json new file mode 100644 index 00000000..3e1e22ac --- /dev/null +++ b/site/datasets/unknown/helen.json @@ -0,0 +1 @@ +{"id": "95f12d27c3b4914e0668a268360948bce92f7db3", "paper": {"paper_id": "95f12d27c3b4914e0668a268360948bce92f7db3", "key": "helen", "title": "Interactive Facial Feature Localization", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf", "address": {"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}, "name": "Helen"}, "citations": [{"id": "7c6686fa4d8c990e931f1d16deabf647bf3b1986", "title": "Input/Output Deep Architecture for Structured Output Problems", "year": "2015", "pdf": "http://arxiv.org/abs/1504.07550"}, {"id": "a40f8881a36bc01f3ae356b3e57eac84e989eef0", "title": "End-to-end semantic face segmentation with conditional random fields as convolutional, recurrent and adversarial networks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/a40f/8881a36bc01f3ae356b3e57eac84e989eef0.pdf"}, {"id": "c2be82ed0db509087b08423c8cf39ab3c36549c3", "title": "Pixel-level guided face editing with fully convolution networks", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019363"}, {"id": "ebedc841a2c1b3a9ab7357de833101648281ff0e", "title": "Facial landmarking for in-the-wild images with local inference based on global appearance", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/ebed/c841a2c1b3a9ab7357de833101648281ff0e.pdf"}, {"id": "b3f18013079e0535dcda045ac5145c201287aec3", "title": "Multi-Label Dilated Recurrent Network for Sequential Face Alignment", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8486438"}, {"id": "bf2f2696fdb4077b5ab18aa583f6376acadf2438", "title": "Supervised descent method based on appearance and shape for face alignment", "year": 2016, "pdf": null}, {"id": "1a8ccc23ed73db64748e31c61c69fe23c48a2bb1", "title": "Extensive Facial Landmark Localization with Coarse-to-Fine Convolutional Network Cascade", "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W11/papers/Zhou_Extensive_Facial_Landmark_2013_ICCV_paper.pdf"}, {"id": "9a23a0402ae68cc6ea2fe0092b6ec2d40f667adb", "title": "High-Resolution Image Synthesis and Semantic Manipulation with Conditional GANs", "year": "2017", "pdf": "https://arxiv.org/pdf/1711.11585.pdf"}, {"id": "22c06284a908d8ad0994ad52119773a034eed7ee", "title": "Adaptive Visual Feedback Generation for Facial Expression Improvement with Multi-task Deep Neural Networks", "year": 2016, "pdf": "http://doi.acm.org/10.1145/2964284.2967236"}, {"id": "5b5b9c6c67855ede21a60c834aea5379df7d51b7", "title": "Advances in compositional fitting of active appearance models", "year": "2016", "pdf": "http://hdl.handle.net/10044/1/45280"}, {"id": "9b9ccd4954cf9dd605d49e9c3504224d06725ab7", "title": "DriveAHead — A Large-Scale Driver Head Pose Dataset", "year": 2017, "pdf": null}, {"id": "eb48a58b873295d719827e746d51b110f5716d6c", "title": "Face Alignment Using K-Cluster Regression Forests With Weighted Splitting", "year": "2016", "pdf": "https://arxiv.org/pdf/1706.01820.pdf"}, {"id": "b48d3694a8342b6efc18c9c9124c62406e6bf3b3", "title": "Recurrent Convolutional Shape Regression", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8305545"}, {"id": "a66d89357ada66d98d242c124e1e8d96ac9b37a0", "title": "Failure Detection for Facial Landmark Detectors", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/a66d/89357ada66d98d242c124e1e8d96ac9b37a0.pdf"}, {"id": "91883dabc11245e393786d85941fb99a6248c1fb", "title": "Face alignment in-the-wild: A Survey", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/9188/3dabc11245e393786d85941fb99a6248c1fb.pdf"}, {"id": "266cb58a82ce44ce8f0cffbfa89e44227096f424", "title": "Assessment for facial nerve paralysis based on facial asymmetry", "year": "2017", "pdf": "http://doi.org/10.1007/s13246-017-0597-4"}, {"id": "397085122a5cade71ef6c19f657c609f0a4f7473", "title": "Using Segmentation to Predict the Absence of Occluded Parts", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/db11/4901d09a07ab66bffa6986bc81303e133ae1.pdf"}, {"id": "49258cc3979103681848284470056956b77caf80", "title": "EPAT: Euclidean Perturbation Analysis and Transform - An Agnostic Data Adaptation Framework for Improving Facial Landmark Detectors", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961745"}, {"id": "61f04606528ecf4a42b49e8ac2add2e9f92c0def", "title": "Deep Deformation Network for Object Landmark Localization", "year": "2016", "pdf": "https://arxiv.org/pdf/1605.01014.pdf"}, {"id": "1966055f13a7475100d18843f85717f312511805", "title": "CG Benefited Driver Facial Landmark Localization Across Large Rotation", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8500537"}, {"id": "2fda461869f84a9298a0e93ef280f79b9fb76f94", "title": "OpenFace: An open source facial behavior analysis toolkit", "year": 2016, "pdf": "https://www.cl.cam.ac.uk/research/rainbow/projects/openface/wacv2016.pdf"}, {"id": "36219a3196aac2bd149bc786f083957a6e6da125", "title": "Recognition of the gaze direction: Anchoring with the eyebrows", "year": 2016, "pdf": "https://doi.org/10.1016/j.jvcir.2015.12.003"}, {"id": "465faf9974a60da00950be977f3bc2fc3e56f5d2", "title": "Facial action unit intensity estimation and feature relevance visualization with random regression forests", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273631"}, {"id": "2df4d05119fe3fbf1f8112b3ad901c33728b498a", "title": "Multi-task Learning for Structured Output Prediction", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/891b/10c4b3b92ca30c9b93170ec9abd71f6099c4.pdf"}, {"id": "9ca7899338129f4ba6744f801e722d53a44e4622", "title": "Deep neural networks regularization for structured output prediction", "year": "2018", "pdf": "https://arxiv.org/pdf/1504.07550.pdf"}, {"id": "9993f1a7cfb5b0078f339b9a6bfa341da76a3168", "title": "A Simple, Fast and Highly-Accurate Algorithm to Recover 3D Shape from 2D Landmarks on a Single Image", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/9993/f1a7cfb5b0078f339b9a6bfa341da76a3168.pdf"}, {"id": "64cac22210861d4e9afb00b781da90cf99f9d19c", "title": "Facial Landmark Detection for Manga Images", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.03214.pdf"}, {"id": "056ba488898a1a1b32daec7a45e0d550e0c51ae4", "title": "Cascaded Continuous Regression for Real-Time Incremental Face Tracking", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/056b/a488898a1a1b32daec7a45e0d550e0c51ae4.pdf"}, {"id": "ef4b5bcaad4c36d7baa7bc166bd1712634c7ad71", "title": "Towards Spatio-temporal Face Alignment in Unconstrained Conditions", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/ef4b/5bcaad4c36d7baa7bc166bd1712634c7ad71.pdf"}, {"id": "d03265ea9200a993af857b473c6bf12a095ca178", "title": "Multiple deep convolutional neural networks averaging for face alignment", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/d032/65ea9200a993af857b473c6bf12a095ca178.pdf"}, {"id": "4e3b71b1aa6b6cb7aa55843d2214441f0076fe69", "title": "Colour-based lips segmentation method using artificial neural networks", "year": 2015, "pdf": null}, {"id": "4068574b8678a117d9a434360e9c12fe6232dae0", "title": "Automatic Construction of Deformable Models In-the-Wild", "year": 2014, "pdf": "http://www.visionmeetscognition.org/fpic2014/Camera_Ready/Paper%2031.pdf"}, {"id": "4a8480d58c30dc484bda08969e754cd13a64faa1", "title": "Offline Deformable Face Tracking in Arbitrary Videos", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406475"}, {"id": "37381718559f767fc496cc34ceb98ff18bc7d3e1", "title": "Harnessing Synthesized Abstraction Images to Improve Facial Attribute Recognition", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/3738/1718559f767fc496cc34ceb98ff18bc7d3e1.pdf"}, {"id": "5239001571bc64de3e61be0be8985860f08d7e7e", "title": "Deep Appearance Models: A Deep Boltzmann Machine Approach for Face Modeling", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/5239/001571bc64de3e61be0be8985860f08d7e7e.pdf"}, {"id": "bc704680b5032eadf78c4e49f548ba14040965bf", "title": "Face Normals \"In-the-Wild\" Using Fully Convolutional Networks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/ccbc/c676546a43cd4b714f0c85cbd493f9c61396.pdf"}, {"id": "1672becb287ae3eaece3e216ba37677ed045db55", "title": "Fully automatic face normalization and single sample face recognition in unconstrained environments", "year": 2016, "pdf": "https://doi.org/10.1016/j.eswa.2015.10.047"}, {"id": "c866a2afc871910e3282fd9498dce4ab20f6a332", "title": "Surveillance Face Recognition Challenge", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.09691.pdf"}, {"id": "f1a58bb78149f408471ce166a13cd9176e5edc5b", "title": "Facial Landmark Extraction Scheme Based on Semantic Segmentation", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8472730"}, {"id": "9ef2b2db11ed117521424c275c3ce1b5c696b9b3", "title": "Robust Face Alignment Using a Mixture of Invariant Experts", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/c31b/dd00734807938dcfd8a12375bd9ffa556985.pdf"}, {"id": "500b92578e4deff98ce20e6017124e6d2053b451", "title": "Incremental Face Alignment in the Wild", "year": 2014, "pdf": "http://eprints.eemcs.utwente.nl/25818/01/Pantic_Incremental_Face_Alignment_in_the_Wild.pdf"}, {"id": "4ac3cd8b6c50f7a26f27eefc64855134932b39be", "title": "Robust Facial Landmark Detection via a Fully-Convolutional Local-Global Context Network", "year": "", "pdf": "https://pdfs.semanticscholar.org/4ac3/cd8b6c50f7a26f27eefc64855134932b39be.pdf"}, {"id": "12d8730da5aab242795bdff17b30b6e0bac82998", "title": "Persistent Evidence of Local Image Properties in Generic ConvNets", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/12d8/730da5aab242795bdff17b30b6e0bac82998.pdf"}, {"id": "47109343e502a4097cb7efee54bc5fbb14598c05", "title": "Improved Strategies for HPE Employing Learning-by-Synthesis Approaches", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.182"}, {"id": "293ade202109c7f23637589a637bdaed06dc37c9", "title": "Material for : Adaptive Cascaded Regression", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/293a/de202109c7f23637589a637bdaed06dc37c9.pdf"}, {"id": "2f0b8579829b3d4efdbc03c96821e33d7cc65e1d", "title": "Using a Deformation Field Model for Localizing Faces and Facial Points under Weak Supervision", "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.472"}, {"id": "bab2f4949a38a712a78aafbc0a3c392227c65f56", "title": "Eye detection using gradient histogram matching for cornea localization in refractive eye surgery", "year": 2017, "pdf": "https://doi.org/10.1109/CISP-BMEI.2017.8302191"}, {"id": "a856449c724f958dbb2f0629228d26a322153ba3", "title": "Face Mask Extraction in Video Sequence", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.09207.pdf"}, {"id": "5f5906168235613c81ad2129e2431a0e5ef2b6e4", "title": "A Unified Framework for Compositional Fitting of Active Appearance Models", "year": 2016, "pdf": "https://arxiv.org/pdf/1601.00199v1.pdf"}, {"id": "a0fd85b3400c7b3e11122f44dc5870ae2de9009a", "title": "Learning Deep Representation for Face Alignment with Auxiliary Attributes", "year": "2016", "pdf": "https://arxiv.org/pdf/1408.3967.pdf"}, {"id": "1d3e01d5e2721dcfafe5a3b39c54ee1c980350bb", "title": "Face Alignment by Explicit Shape Regression", "year": 2012, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6248015"}, {"id": "1f9ae272bb4151817866511bd970bffb22981a49", "title": "An Iterative Regression Approach for Face Pose Estimation from RGB Images", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/1f9a/e272bb4151817866511bd970bffb22981a49.pdf"}, {"id": "c5ea084531212284ce3f1ca86a6209f0001de9d1", "title": "Audio-visual speech processing for multimedia localisation", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/c5ea/084531212284ce3f1ca86a6209f0001de9d1.pdf"}, {"id": "daa4cfde41d37b2ab497458e331556d13dd14d0b", "title": "Multi-view Constrained Local Models for Large Head Angle Facial Tracking", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406477"}, {"id": "837e99301e00c2244023a8a48ff98d7b521c93ac", "title": "Local Feature Evaluation for a Constrained Local Model Framework", "year": "2014", "pdf": "https://pdfs.semanticscholar.org/b7b7/4e0ec15c22e1c94406c592bbb83c8e865f52.pdf"}, {"id": "d9deafd9d9e60657a7f34df5f494edff546c4fb8", "title": "Learning the Multilinear Structure of Visual Data", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100124"}, {"id": "9207671d9e2b668c065e06d9f58f597601039e5e", "title": "Face Detection Using a 3D Model on Face Keypoints", "year": "2014", "pdf": "https://pdfs.semanticscholar.org/9207/671d9e2b668c065e06d9f58f597601039e5e.pdf"}, {"id": "5c124b57699be19cd4eb4e1da285b4a8c84fc80d", "title": "Unified Face Analysis by Iterative Multi-output Random Forests", "year": 2014, "pdf": "http://www.iis.ee.ic.ac.uk/icvl/doc/cvpr14_xiaowei.pdf"}, {"id": "6d8c9a1759e7204eacb4eeb06567ad0ef4229f93", "title": "Face Alignment Robust to Pose, Expressions and Occlusions", "year": "2016", "pdf": "https://arxiv.org/pdf/1707.05938.pdf"}, {"id": "38192a0f9261d9727b119e294a65f2e25f72d7e6", "title": "Facial feature point detection: A comprehensive survey", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/3819/2a0f9261d9727b119e294a65f2e25f72d7e6.pdf"}, {"id": "c41a3c31972cf0c1be6b6895f3bf97181773fcfb", "title": "Accurate Facial Landmarks Detection for Frontal Faces with Extended Tree-Structured Models", "year": 2014, "pdf": "https://doi.org/10.1109/ICPR.2014.103"}, {"id": "4f77a37753c03886ca9c9349723ec3bbfe4ee967", "title": "Localizing Facial Keypoints with Global Descriptor Search, Neighbour Alignment and Locally Linear Models", "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W11/papers/Hasan_Localizing_Facial_Keypoints_2013_ICCV_paper.pdf"}, {"id": "830e5b1043227fe189b3f93619ef4c58868758a7", "title": "A survey on face detection in the wild: Past, present and future", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/830e/5b1043227fe189b3f93619ef4c58868758a7.pdf"}, {"id": "b07582d1a59a9c6f029d0d8328414c7bef64dca0", "title": "Employing Fusion of Learned and Handcrafted Features for Unconstrained Ear Recognition", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/b075/82d1a59a9c6f029d0d8328414c7bef64dca0.pdf"}, {"id": "3504907a2e3c81d78e9dfe71c93ac145b1318f9c", "title": "Unconstrained Still/Video-Based Face Verification with Deep Convolutional Neural Networks", "year": 2017, "pdf": "https://arxiv.org/pdf/1605.02686v3.pdf"}, {"id": "31e57fa83ac60c03d884774d2b515813493977b9", "title": "Face alignment with cascaded semi-parametric deep greedy neural forests", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/31e5/7fa83ac60c03d884774d2b515813493977b9.pdf"}, {"id": "303065c44cf847849d04da16b8b1d9a120cef73a", "title": "3D Face Morphable Models \"In-the-Wild\"", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/3030/65c44cf847849d04da16b8b1d9a120cef73a.pdf"}, {"id": "07d95be4922670ef2f8b11997e0c00eb643f3fca", "title": "The First Facial Landmark Tracking in-the-Wild Challenge: Benchmark and Results", "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.132"}, {"id": "0561bed18b6278434deae562d646e8adad72e75d", "title": "Low rank driven robust facial landmark regression", "year": 2015, "pdf": "https://doi.org/10.1016/j.neucom.2014.09.052"}, {"id": "766728bac030b169fcbc2fbafe24c6e22a58ef3c", "title": "A survey of deep facial landmark detection", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/7667/28bac030b169fcbc2fbafe24c6e22a58ef3c.pdf"}, {"id": "ca537e1726a8d8c371a71bbd6d9098774ab51955", "title": "3D Reconstruction of \u201cIn-the-Wild\u201d Faces in Images and Videos", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359431"}, {"id": "45e7ddd5248977ba8ec61be111db912a4387d62f", "title": "Adversarial Learning of Structure-Aware Fully Convolutional Networks for Landmark Localization", "year": "2017", "pdf": "https://arxiv.org/pdf/1711.00253.pdf"}, {"id": "47e8db3d9adb79a87c8c02b88f432f911eb45dc5", "title": "MAGMA: Multilevel Accelerated Gradient Mirror Descent Algorithm for Large-Scale Convex Composite Minimization", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/5f99/63990ab7dd888ab33393f712f8d5c1463348.pdf"}, {"id": "413160257096b9efcd26d8de0d1fa53133b57a3d", "title": "Customer satisfaction measuring based on the most significant facial emotion", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/4131/60257096b9efcd26d8de0d1fa53133b57a3d.pdf"}, {"id": "029b53f32079063047097fa59cfc788b2b550c4b", "title": "Continuous Conditional Neural Fields for Structured Regression", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/b71c/73fcae520f6a5cdbce18c813633fb3d66342.pdf"}, {"id": "8a336e9a4c42384d4c505c53fb8628a040f2468e", "title": "Detecting Visually Observable Disease Symptoms from Faces", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/8a33/6e9a4c42384d4c505c53fb8628a040f2468e.pdf"}, {"id": "3d78c144672c4ee76d92d21dad012bdf3c3aa1a0", "title": "Unconstrained Still/Video-Based Face Verification with Deep Convolutional Neural Networks", "year": "2017", "pdf": "http://doi.org/10.1007/s11263-017-1029-3"}, {"id": "88e2efab01e883e037a416c63a03075d66625c26", "title": "Convolutional Experts Constrained Local Model for 3D Facial Landmark Detection", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265507"}, {"id": "f423d8be5e13d9ef979debd3baf0a1b2e1d3682f", "title": "Approaching human level facial landmark localization by deep learning", "year": 2016, "pdf": "https://doi.org/10.1016/j.imavis.2015.11.004"}, {"id": "0a6d344112b5af7d1abbd712f83c0d70105211d0", "title": "Constrained Local Neural Fields for Robust Facial Landmark Detection in the Wild", "year": 2013, "pdf": "http://www.cl.cam.ac.uk/~tb346/pub/papers/iccv2013.pdf"}, {"id": "f61829274cfe64b94361e54351f01a0376cd1253", "title": "Regressing a 3D Face Shape from a Single Image", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410784"}, {"id": "708f4787bec9d7563f4bb8b33834de445147133b", "title": "Wavelet-SRNet: A Wavelet-Based CNN for Multi-scale Face Super Resolution", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237449"}, {"id": "b730908bc1f80b711c031f3ea459e4de09a3d324", "title": "Active Orientation Models for Face Alignment In-the-Wild", "year": 2014, "pdf": "http://ibug.doc.ic.ac.uk/media/uploads/documents/tifs_aoms.pdf"}, {"id": "29c5a44e01d1126505471b2ab46163d598c871c7", "title": "Improving Landmark Localization with Semi-Supervised Learning", "year": "2017", "pdf": "https://arxiv.org/pdf/1709.01591.pdf"}, {"id": "f0ae807627f81acb63eb5837c75a1e895a92c376", "title": "Facial Landmark Detection using Ensemble of Cascaded Regressions", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/f0ae/807627f81acb63eb5837c75a1e895a92c376.pdf"}, {"id": "bd13f50b8997d0733169ceba39b6eb1bda3eb1aa", "title": "Occlusion Coherence: Detecting and Localizing Occluded Faces", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/bd13/f50b8997d0733169ceba39b6eb1bda3eb1aa.pdf"}, {"id": "86c053c162c08bc3fe093cc10398b9e64367a100", "title": "Cascade of forests for face alignment", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/86c0/53c162c08bc3fe093cc10398b9e64367a100.pdf"}, {"id": "6966d9d30fa9b7c01523425726ab417fd8428790", "title": "Exemplar-Based Face Parsing", "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619291"}, {"id": "f20ed84abcb1223f351a576ef10dfda9f277326b", "title": "Face recognition based on LBPH and regression of Local Binary features", "year": 2016, "pdf": null}, {"id": "9b8f7a6850d991586b7186f0bb7e424924a9fd74", "title": "Disentangling the Modes of Variation in Unlabelled Data", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8214214"}, {"id": "33ae696546eed070717192d393f75a1583cd8e2c", "title": "Subspace selection to suppress confounding source domain information in AAM transfer learning", "year": 2017, "pdf": "https://doi.org/10.1109/BTAS.2017.8272730"}, {"id": "2c14c3bb46275da5706c466f9f51f4424ffda914", "title": "L2, 1-based regression and prediction accumulation across views for robust facial landmark detection", "year": "2016", "pdf": "http://doi.org/10.1016/j.imavis.2015.09.003"}, {"id": "ef52f1e2b52fd84a7e22226ed67132c6ce47b829", "title": "Online Eye Status Detection in the Wild with Convolutional Neural Networks", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/ef52/f1e2b52fd84a7e22226ed67132c6ce47b829.pdf"}, {"id": "94aa8a3787385b13ee7c4fdd2b2b2a574ffcbd81", "title": "Real-time generic face tracking in the wild with CUDA", "year": 2014, "pdf": "http://doi.acm.org/10.1145/2557642.2579369"}, {"id": "3d6f59e0f0e16d01b9c588a53d3b6b3b984e991e", "title": "Learning Local Responses of Facial Landmarks with Conditional Variational Auto-Encoder for Face Alignment", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.117"}, {"id": "7360a2adcd6e3fe744b7d7aec5c08ee31094dfd4", "title": "Deep and Deformable: Convolutional Mixtures of Deformable Part-Based Models", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373833"}, {"id": "1c1f957d85b59d23163583c421755869f248ceef", "title": "Robust Facial Landmark Detection Under Significant Head Poses and Occlusion", "year": 2015, "pdf": "http://homepages.rpi.edu/~wuy9/ICCV15/FLD_iccv15.pdf"}, {"id": "5da98f7590c08e83889f3cec7b0304b3610abf42", "title": "Face alignment using a deep neural network with local feature learning and recurrent regression", "year": 2017, "pdf": "https://doi.org/10.1016/j.eswa.2017.07.018"}, {"id": "5fa6e4a23da0b39e4b35ac73a15d55cee8608736", "title": "RED-Net: A Recurrent Encoder\u2013Decoder Network for Video-Based Face Alignment", "year": "2018", "pdf": "https://arxiv.org/pdf/1801.06066.pdf"}, {"id": "5e0b691e9e5812dd3cb120a8d77619a45aa8e4c4", "title": "Pose-indexed based multi-view method for face alignment", "year": 2016, "pdf": "https://doi.org/10.1109/ICIP.2016.7532567"}, {"id": "2e3d081c8f0e10f138314c4d2c11064a981c1327", "title": "A Comprehensive Performance Evaluation of Deformable Face Tracking \u201cIn-the-Wild\u201d", "year": 2017, "pdf": "http://arxiv.org/pdf/1603.06015v1.pdf"}, {"id": "88d63a0cc0b8a5303bdef286d6df118bb1d44d26", "title": "Real-time mimicking of estonian speaker's mouth movements on a 3D avatar using Kinect 2", "year": 2015, "pdf": null}, {"id": "f354cd137fdc40a3ff6a4004f2a052966c275627", "title": "Very Fast Semantic Image Segmentation Using Hierarchical Dilation and Feature Refining", "year": "2017", "pdf": "http://doi.org/10.1007/s12559-017-9530-0"}, {"id": "17c0d99171efc957b88c31a465c59485ab033234", "title": "To learn image super-resolution, use a GAN to learn how to do image degradation first", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11458.pdf"}, {"id": "2f7aa942313b1eb12ebfab791af71d0a3830b24c", "title": "Feature-Based Lucas–Kanade and Active Appearance Models", "year": 2015, "pdf": null}, {"id": "e4754afaa15b1b53e70743880484b8d0736990ff", "title": "300 Faces In-The-Wild Challenge: database and results", "year": "2016", "pdf": "http://doi.org/10.1016/j.imavis.2016.01.002"}, {"id": "788a7b59ea72e23ef4f86dc9abb4450efefeca41", "title": "Robust Statistical Face Frontalization", "year": 2015, "pdf": "http://eprints.eemcs.utwente.nl/26840/01/Pantic_Robust_Statistical_Face_Frontalization.pdf"}, {"id": "3c086601ce0bac61047b5b931b253bd4035e1e7a", "title": "Occlusion handling in feature point tracking using ranked parts based models", "year": 2015, "pdf": "https://doi.org/10.1109/ICIP.2015.7350897"}, {"id": "4f742c09ce12859b20deaa372c8f1575acfc99c9", "title": "How do you smile? Towards a comprehensive smile analysis system", "year": 2017, "pdf": "https://doi.org/10.1016/j.neucom.2017.01.020"}, {"id": "5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725", "title": "Merging Pose Estimates Across Space and Time", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf"}, {"id": "5180c98815d7034e753a14ef6f54583f115da3aa", "title": "Challenging 3D Head Tracking and Evaluation Using Unconstrained Test Data Set", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/iV.2017.40"}, {"id": "e6178de1ef15a6a973aad2791ce5fbabc2cb8ae5", "title": "Improving Facial Landmark Detection via a Super-Resolution Inception Network", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/e617/8de1ef15a6a973aad2791ce5fbabc2cb8ae5.pdf"}, {"id": "0a6a25ee84fc0bf7284f41eaa6fefaa58b5b329a", "title": "Neural Networks Regularization Through Representation Learning", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.05292.pdf"}, {"id": "95e3b78eb4d5b469f66648ed4f37e45e0e01e63e", "title": "Facial point localization via neural networks in a cascade regression framework", "year": "2016", "pdf": "http://doi.org/10.1007/s11042-016-4261-x"}, {"id": "4a4f0a47de1567f3f913e2632921797df36b2525", "title": "Synthetic Prior Design for Real-Time Face Tracking", "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7785140"}, {"id": "530243b61fa5aea19b454b7dbcac9f463ed0460e", "title": "ReenactGAN: Learning to Reenact Faces via Boundary Transfer", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11079.pdf"}, {"id": "31af1f2614823504d1d643d1b019c6f9d2150b15", "title": "Super-FAN: Integrated facial landmark localization and super-resolution of real-world low resolution faces in arbitrary poses with GANs", "year": "2017", "pdf": "https://arxiv.org/pdf/1712.02765.pdf"}, {"id": "3352426a67eabe3516812cb66a77aeb8b4df4d1b", "title": "Joint Multi-view Face Alignment in the Wild", "year": "2017", "pdf": "https://arxiv.org/pdf/1708.06023.pdf"}, {"id": "e52272f92fa553687f1ac068605f1de929efafc2", "title": "Using a Probabilistic Neural Network for lip-based biometric verification", "year": 2017, "pdf": "https://doi.org/10.1016/j.engappai.2017.06.003"}, {"id": "ca83053d9a790319b11a04eac5ab412e7fcab914", "title": "Efficient generic face model fitting to images and videos", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/ca83/053d9a790319b11a04eac5ab412e7fcab914.pdf"}, {"id": "1824b1ccace464ba275ccc86619feaa89018c0ad", "title": "One millisecond face alignment with an ensemble of regression trees", "year": 2014, "pdf": "http://www.csc.kth.se/~vahidk/papers/KazemiCVPR14.pdf"}, {"id": "72a1852c78b5e95a57efa21c92bdc54219975d8f", "title": "Cascaded regression with sparsified feature covariance matrix for facial landmark detection", "year": "2016", "pdf": "http://doi.org/10.1016/j.patrec.2015.11.014"}, {"id": "234c106036964131c0f2daf76c47ced802652046", "title": "Adaptive facial point detection and emotion recognition for a humanoid robot", "year": "2015", "pdf": "http://doi.org/10.1016/j.cviu.2015.07.007"}, {"id": "64e82b42e1c41250bdf9eb952686631287cfd410", "title": "Evaluating the Quality of Face Alignment without Ground Truth", "year": 2015, "pdf": "https://doi.org/10.1111/cgf.12760"}, {"id": "2724ba85ec4a66de18da33925e537f3902f21249", "title": "Robust Face Landmark Estimation under Occlusion", "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298"}, {"id": "30f6c4bd29b9a8c94f37f3818cf6145c1507826f", "title": "The research of facial features localization based on posterior probability deformable model", "year": 2015, "pdf": null}, {"id": "390f3d7cdf1ce127ecca65afa2e24c563e9db93b", "title": "Learning Deep Representation for Face Alignment with Auxiliary Attributes", "year": 2016, "pdf": "https://arxiv.org/pdf/1408.3967v2.pdf"}, {"id": "e97ba85a4550667b8a28f83a98808d489e0ff3bc", "title": "A Research on Fast Face Feature Points Detection on Smart Mobile Devices", "year": "2018", "pdf": "http://doi.org/10.1155/2018%2F9729014"}, {"id": "f070d739fb812d38571ec77490ccd8777e95ce7a", "title": "Hierarchical facial landmark localization via cascaded random binary patterns", "year": "2015", "pdf": "http://doi.org/10.1016/j.patcog.2014.09.007"}, {"id": "0b0958493e43ca9c131315bcfb9a171d52ecbb8a", "title": "A Unified Neural Based Model for Structured Output Problems", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/0b09/58493e43ca9c131315bcfb9a171d52ecbb8a.pdf"}, {"id": "0e8760fc198a7e7c9f4193478c0e0700950a86cd", "title": "Brute-Force Facial Landmark Analysis With A 140, 000-Way Classifier", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/0e87/60fc198a7e7c9f4193478c0e0700950a86cd.pdf"}, {"id": "dcf6ecd51ba135d432fcb7697fc6c52e4e7b0a43", "title": "Factorized Variational Autoencoders for Modeling Audience Reactions to Movies", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100120"}, {"id": "f633d6dc02b2e55eb24b89f2b8c6df94a2de86dd", "title": "Face alignment by robust discriminative Hough voting", "year": "2016", "pdf": "http://doi.org/10.1016/j.patcog.2016.05.017"}, {"id": "56fd4c05869e11e4935d48aa1d7abb96072ac242", "title": "OpenFace 2.0: Facial Behavior Analysis Toolkit", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373812"}, {"id": "ec1e03ec72186224b93b2611ff873656ed4d2f74", "title": "D Reconstruction of \u201c Inthe-Wild \u201d Faces in Images and Videos", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/ec1e/03ec72186224b93b2611ff873656ed4d2f74.pdf"}, {"id": "6ba6045e4b404c44f9b4dfce2d946019f0e85a72", "title": "Facial landmark detection based on an ensemble of local weighted regressors during real driving situation", "year": 2016, "pdf": "https://doi.org/10.1109/ICPR.2016.7899962"}, {"id": "ed779cc4f026f6ac22f5ef0c34126138e1ebc8b2", "title": "Audio Visual Recognition of Spontaneous Emotions In-the-Wild", "year": 2016, "pdf": "https://doi.org/10.1007/978-981-10-3005-5_57"}, {"id": "2fb8d7601fc3ad637781127620104aaab5122acd", "title": "Estimating Correspondences of Deformable Objects “In-the-Wild”", "year": 2016, "pdf": null}, {"id": "6e38011e38a1c893b90a48e8f8eae0e22d2008e8", "title": "A Computer Vision Based Approach for Understanding Emotional Involvements in Children with Autism Spectrum Disorders", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265376"}, {"id": "8875dcf2836315839741fd6944f249263408c27f", "title": "Facial landmark detection via self-adaption model and multi-task feature learning", "year": 2016, "pdf": null}, {"id": "656a59954de3c9fcf82ffcef926af6ade2f3fdb5", "title": "Convolutional Network Representation for Visual Recognition", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/656a/59954de3c9fcf82ffcef926af6ade2f3fdb5.pdf"}, {"id": "891b10c4b3b92ca30c9b93170ec9abd71f6099c4", "title": "2 New Statement for Structured Output Regression Problems", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/891b/10c4b3b92ca30c9b93170ec9abd71f6099c4.pdf"}, {"id": "dee406a7aaa0f4c9d64b7550e633d81bc66ff451", "title": "Content-Adaptive Sketch Portrait Generation by Decompositional Representation Learning", "year": "2017", "pdf": "https://arxiv.org/pdf/1710.01453.pdf"}, {"id": "60824ee635777b4ee30fcc2485ef1e103b8e7af9", "title": "Cascaded Collaborative Regression for Robust Facial Landmark Detection Trained Using a Mixture of Synthetic and Real Images With Dynamic Weighting", "year": 2015, "pdf": "http://www.ee.surrey.ac.uk/CVSSP/Publications/papers/Feng-TIP-2015.pdf"}, {"id": "6f5ce5570dc2960b8b0e4a0a50eab84b7f6af5cb", "title": "Low Resolution Face Recognition Using a Two-Branch Deep Convolutional Neural Network Architecture", "year": "2017", "pdf": "https://arxiv.org/pdf/1706.06247.pdf"}, {"id": "4848a48a2b8bacd2092e87961cd86818da8e7151", "title": "Comparative evaluation of facial fiducial point detection approaches", "year": 2017, "pdf": "https://doi.org/10.1109/VCIP.2017.8305080"}, {"id": "50ccc98d9ce06160cdf92aaf470b8f4edbd8b899", "title": "Towards robust cascaded regression for face alignment in the wild", "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W08/papers/Qu_Towards_Robust_Cascaded_2015_CVPR_paper.pdf"}, {"id": "8f772d9ce324b2ef5857d6e0b2a420bc93961196", "title": "Facial Landmark Point Localization using Coarse-to-Fine Deep Recurrent Neural Network", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.01760.pdf"}, {"id": "44389d8e20cf9f1a8453f4ba033e03cff9bdfcbb", "title": "Facial landmark localization by enhanced convolutional neural network", "year": 2018, "pdf": "https://doi.org/10.1016/j.neucom.2017.07.052"}, {"id": "9ab963e473829739475b9e47514f454ab467a5af", "title": "A Fully End-to-End Cascaded CNN for Facial Landmark Detection", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.33"}, {"id": "faead8f2eb54c7bc33bc7d0569adc7a4c2ec4c3b", "title": "Combining Data-driven and Model-driven Methods for Robust Facial Landmark Detection", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/faea/d8f2eb54c7bc33bc7d0569adc7a4c2ec4c3b.pdf"}, {"id": "f7dea4454c2de0b96ab5cf95008ce7144292e52a", "title": "Facial Landmark Detection: A Literature Survey", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.05563.pdf"}, {"id": "d850aff9d10a01ad5f1d8a1b489fbb3998d0d80e", "title": "Recognizing and Segmenting Objects in the Presence of Occlusion and Clutter", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d850/aff9d10a01ad5f1d8a1b489fbb3998d0d80e.pdf"}, {"id": "131e395c94999c55c53afead65d81be61cd349a4", "title": "A Functional Regression approach to Facial Landmark Tracking", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/2c3f/aeaf0fe103e1e6cb8c2116728e2a5c7b7f29.pdf"}, {"id": "191d30e7e7360d565b0c1e2814b5bcbd86a11d41", "title": "Discriminative Deep Face Shape Model for Facial Point Detection", "year": 2014, "pdf": "http://homepages.rpi.edu/~wuy9/DiscriminativeDeepFaceShape/DiscriminativeDeepFaceShape_IJCV.pdf"}, {"id": "df80fed59ffdf751a20af317f265848fe6bfb9c9", "title": "Learning Deep Sharable and Structural Detectors for Face Alignment", "year": 2017, "pdf": "http://ivg.au.tsinghua.edu.cn/paper/2017_Learning%20deep%20sharable%20and%20structural%20detectors%20for%20face%20alignment.pdf"}, {"id": "375435fb0da220a65ac9e82275a880e1b9f0a557", "title": "From Pixels to Response Maps: Discriminative Image Filtering for Face Alignment in the Wild", "year": 2015, "pdf": "https://ibug.doc.ic.ac.uk/media/uploads/documents/tpami_alignment.pdf"}, {"id": "c3d3d2229500c555c7a7150a8b126ef874cbee1c", "title": "Shape Augmented Regression Method for Face Alignment", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406478"}, {"id": "cad2bd940e7580490da9cc739e597d029e166504", "title": "Salient-points-guided face alignment", "year": 2017, "pdf": null}, {"id": "68f0d2b41b1eb4ea6b5e841c64f48b58b21253b0", "title": "Driver Facial Landmark Detection in Real Driving Situations", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8094282"}, {"id": "1b0a071450c419138432c033f722027ec88846ea", "title": "Looking at faces in a vehicle: A deep CNN based approach and evaluation", "year": 2016, "pdf": "https://doi.org/10.1109/ITSC.2016.7795622"}, {"id": "af11769a427eb8daa8435b1ea3252531b4275db8", "title": "A Hybrid Approach for Face Alignment 1", "year": "2017", "pdf": null}]}
\ No newline at end of file diff --git a/site/datasets/unknown/ijb_c.json b/site/datasets/unknown/ijb_c.json new file mode 100644 index 00000000..09f06a9e --- /dev/null +++ b/site/datasets/unknown/ijb_c.json @@ -0,0 +1 @@ +{"id": "0cb2dd5f178e3a297a0c33068961018659d0f443", "paper": {"paper_id": "0cb2dd5f178e3a297a0c33068961018659d0f443", "key": "ijb_c", "title": "IARPA Janus Benchmark-B Face Dataset", "year": 2017, "pdf": "http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf", "address": "", "name": "IJB-B"}, "citations": [{"id": "57178b36c21fd7f4529ac6748614bb3374714e91", "title": "IARPA Janus Benchmark - C: Face Dataset and Protocol", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217"}, {"id": "06bd34951305d9f36eb29cf4532b25272da0e677", "title": "A Fast and Accurate System for Face Detection, Identification, and Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.07586.pdf"}, {"id": "d1a43737ca8be02d65684cf64ab2331f66947207", "title": "IJB \u2013 S : IARPA Janus Surveillance Video Benchmark \u2217", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/d1a4/3737ca8be02d65684cf64ab2331f66947207.pdf"}, {"id": "c866a2afc871910e3282fd9498dce4ab20f6a332", "title": "Surveillance Face Recognition Challenge", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.09691.pdf"}, {"id": "ac5ab8f71edde6d1a2129da12d051ed03a8446a1", "title": "Comparator Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11440.pdf"}, {"id": "7ce03597b703a3b6754d1adac5fbc98536994e8f", "title": "On the Intrinsic Dimensionality of Face Representation", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/7ce0/3597b703a3b6754d1adac5fbc98536994e8f.pdf"}, {"id": "94f74c6314ffd02db581e8e887b5fd81ce288dbf", "title": "A Light CNN for Deep Face Representation with Noisy Labels", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/94f7/4c6314ffd02db581e8e887b5fd81ce288dbf.pdf"}, {"id": "1ffe20eb32dbc4fa85ac7844178937bba97f4bf0", "title": "Face Clustering: Representation and Pairwise Constraints", "year": "2018", "pdf": "https://arxiv.org/pdf/1706.05067.pdf"}, {"id": "7323b594d3a8508f809e276aa2d224c4e7ec5a80", "title": "An Experimental Evaluation of Covariates Effects on Unconstrained Face Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.05508.pdf"}, {"id": "e6b45d5a86092bbfdcd6c3c54cda3d6c3ac6b227", "title": "Pairwise Relational Networks for Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.04976.pdf"}, {"id": "47e14fdc6685f0b3800f709c32e005068dfc8d47", "title": "Secure Face Matching Using Fully Homomorphic Encryption", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.00577.pdf"}, {"id": "4f623e3821d14553b3b286e20910db9225fb723f", "title": "Audio-Visual Person Recognition in Multimedia Data From the Iarpa Janus Program", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462122"}, {"id": "0b82bf595e76898993ed4f4b2883c42720c0f277", "title": "Improving Face Recognition by Exploring Local Features with Visual Attention", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411229"}, {"id": "a2e0966f303f38b58b898d388d1c83e40b605262", "title": "ECLIPSE: Ensembles of Centroids Leveraging Iteratively Processed Spatial Eclipse Clustering", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354125"}, {"id": "a357bc79b1ac6f2474ff6b9f001419745a8bc21c", "title": "Toward More Realistic Face Recognition Evaluation Protocols for the YouTube Faces Database", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/a357/bc79b1ac6f2474ff6b9f001419745a8bc21c.pdf"}, {"id": "d44a93027208816b9e871101693b05adab576d89", "title": "On the Capacity of Face Representation", "year": "2017", "pdf": "https://arxiv.org/pdf/1709.10433.pdf"}, {"id": "15ebec3796a2e23d31c8c8ddf6d21555be6eadc6", "title": "Recent Advances in Object Detection in the Age of Deep Convolutional Neural Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.03193.pdf"}, {"id": "069bb452e015ef53f0ef30e9690e460ccc73cf03", "title": "Multicolumn Networks for Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.09192.pdf"}, {"id": "c220f457ad0b28886f8b3ef41f012dd0236cd91a", "title": "Crystal Loss and Quality Pooling for Unconstrained Face Verification and Recognition", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/c220/f457ad0b28886f8b3ef41f012dd0236cd91a.pdf"}, {"id": "fbc2f5cf943f440b1ba2374ecf82d0176a44f1eb", "title": "3D-Aided Dual-Agent GANs for Unconstrained Face Recognition.", "year": "2018", "pdf": "https://www.ncbi.nlm.nih.gov/pubmed/30040629"}, {"id": "4e32fbb58154e878dd2fd4b06398f85636fd0cf4", "title": "A Hierarchical Matcher using Local Classifier Chains", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.02339.pdf"}, {"id": "9ba4a5e0b7bf6e26563d294f1f3de44d95b7f682", "title": "To Frontalize or Not to Frontalize: Do We Really Need Elaborate Pre-processing to Improve Face Recognition?", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354113"}, {"id": "f3b84a03985de3890b400b68e2a92c0a00afd9d0", "title": "Large Variability Surveillance Camera Face Database", "year": 2015, "pdf": null}, {"id": "011e6146995d5d63c852bd776f782cc6f6e11b7b", "title": "Fast Training of Triplet-Based Deep Binary Embedding Networks", "year": 2016, "pdf": "http://openaccess.thecvf.com/content_cvpr_2016/papers/Zhuang_Fast_Training_of_CVPR_2016_paper.pdf"}, {"id": "5fea59ccdab484873081eaa37af88e26e3db2aed", "title": "Capacitive Sensor for Tagless Remote Human Identification Using Body Frequency Absorption Signatures", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8263394"}, {"id": "9cc8cf0c7d7fa7607659921b6ff657e17e135ecc", "title": "Detecting Masked Faces in the Wild with LLE-CNNs", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099536"}, {"id": "18858cc936947fc96b5c06bbe3c6c2faa5614540", "title": "Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/03c1/fc9c3339813ed81ad0de540132f9f695a0f8.pdf"}, {"id": "1e8eee51fd3bf7a9570d6ee6aa9a09454254689d", "title": "Face Search at Scale", "year": 2017, "pdf": "http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/WangOttoJain_FaceSearchAtScale_TPAMI.pdf"}, {"id": "31aa20911cc7a2b556e7d273f0bdd5a2f0671e0a", "title": "Patch-based Face Recognition using a Hierarchical Multi-label Matcher", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/31aa/20911cc7a2b556e7d273f0bdd5a2f0671e0a.pdf"}, {"id": "59fc69b3bc4759eef1347161e1248e886702f8f7", "title": "Final Report of Final Year Project HKU-Face : A Large Scale Dataset for Deep Face Recognition", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/59fc/69b3bc4759eef1347161e1248e886702f8f7.pdf"}, {"id": "6909cd34a1eceba2140e2c02a842cefcecf33645", "title": "Face Recognition of the Rhinopithecus Roxellana Qinlingensis Based on Improved HOG and Sparse Representation", "year": 2017, "pdf": null}, {"id": "77362789d04db4c51be61eaffa4f43e03759e677", "title": "Fuzzy Analysis and Deep Convolution Neural Networks in Still-to-video Recognition", "year": 2018, "pdf": null}, {"id": "56fd4c05869e11e4935d48aa1d7abb96072ac242", "title": "OpenFace 2.0: Facial Behavior Analysis Toolkit", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373812"}, {"id": "7e8c8b1d72c67e2e241184448715a8d4bd88a727", "title": "Face Verification Based on Relational Disparity Features and Partial Least Squares Models", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8097314"}, {"id": "99ced8f36d66dce20d121f3a29f52d8b27a1da6c", "title": "Organizing Multimedia Data in Video Surveillance Systems Based on Face Verification with Convolutional Neural Networks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/99ce/d8f36d66dce20d121f3a29f52d8b27a1da6c.pdf"}, {"id": "0077cd8f97cafd2b389783858a6e4ab7887b0b6b", "title": "Face Image Reconstruction from Deep Templates", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/b971/266b29fcecf1d5efe1c4dcdc2355cb188ab0.pdf"}, {"id": "cb30c1370885033bc833bc7ef90a25ee0900c461", "title": "FaceOff: Anonymizing Videos in the Operating Rooms", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.04440.pdf"}, {"id": "b503793943a17d2f569685cd17e86b5b4fffe3fd", "title": "How popular CNNs perform in real applications of face recognition", "year": 2016, "pdf": null}, {"id": "a8117a4733cce9148c35fb6888962f665ae65b1e", "title": "A Good Practice Towards Top Performance of Face Recognition: Transferred Deep Feature Fusion", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/a811/7a4733cce9148c35fb6888962f665ae65b1e.pdf"}, {"id": "aed321909bb87c81121c841b21d31509d6c78f69", "title": "Unfamiliar Sides , Video , Image Enhancement in Face Recognition", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/aed3/21909bb87c81121c841b21d31509d6c78f69.pdf"}, {"id": "3504907a2e3c81d78e9dfe71c93ac145b1318f9c", "title": "Unconstrained Still/Video-Based Face Verification with Deep Convolutional Neural Networks", "year": 2017, "pdf": "https://arxiv.org/pdf/1605.02686v3.pdf"}, {"id": "af4759f5e636b5d9049010d5f0e2b0df2a69cd72", "title": "Understanding Humans in Crowded Scenes: Deep Nested Adversarial Learning and A New Benchmark for Multi-Human Parsing", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3240509"}, {"id": "2c052a1c77a3ec2604b3deb702d77c41418c7d3e", "title": "What Is the Challenge for Deep Learning in Unconstrained Face Recognition?", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373863"}, {"id": "84c5b45328dee855c4855a104ac9c0558cc8a328", "title": "Conformal Mapping of a 3D Face Representation onto a 2D Image for CNN Based Face Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411213"}, {"id": "ddf099f0e0631da4a6396a17829160301796151c", "title": "Learning Face Image Quality from Human Assessments", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/ddf0/99f0e0631da4a6396a17829160301796151c.pdf"}, {"id": "44078d0daed8b13114cffb15b368acc467f96351", "title": "Triplet probabilistic embedding for face verification and clustering", "year": 2016, "pdf": "http://arxiv.org/pdf/1604.05417v1.pdf"}, {"id": "77037a22c9b8169930d74d2ce6f50f1a999c1221", "title": "Robust Face Recognition With Kernelized Locality-Sensitive Group Sparsity Representation", "year": 2017, "pdf": "https://ueaeprints.uea.ac.uk/64308/1/Accepted_manuscript.pdf"}, {"id": "e465f596d73f3d2523dbf8334d29eb93a35f6da0", "title": "On Face Segmentation, Face Swapping, and Face Perception", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/e465/f596d73f3d2523dbf8334d29eb93a35f6da0.pdf"}, {"id": "04bb3fa0824d255b01e9db4946ead9f856cc0b59", "title": "Maximum A Posteriori Estimation of Distances Between Deep Features in Still-to-Video Face Recognition", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/c1de/db5ac05c955e53d7ef1f6367fb7badea49b1.pdf"}, {"id": "1ef4aac0ebc34e76123f848c256840d89ff728d0", "title": "Rapid Synthesis of Massive Face Sets for Improved Face Recognition", "year": 2017, "pdf": "http://www.openu.ac.il/home/hassner/projects/augmented_faces/Masietal2017rapid.pdf"}, {"id": "7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22", "title": "Labeled Faces in the Wild: A Survey", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf"}, {"id": "bb4f83458976755e9310b241a689c8d21b481238", "title": "Improving Face Verification and Person Re-Identification Accuracy Using Hyperplane Similarity", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265393"}, {"id": "2e3d081c8f0e10f138314c4d2c11064a981c1327", "title": "A Comprehensive Performance Evaluation of Deformable Face Tracking \u201cIn-the-Wild\u201d", "year": 2017, "pdf": "http://arxiv.org/pdf/1603.06015v1.pdf"}, {"id": "173657da03e3249f4e47457d360ab83b3cefbe63", "title": "HKU-Face : A Large Scale Dataset for Deep Face Recognition Final Report", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/1736/57da03e3249f4e47457d360ab83b3cefbe63.pdf"}, {"id": "6dcf418c778f528b5792104760f1fbfe90c6dd6a", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984"}, {"id": "b7bde76f52b3a8a20a05d2a01dec1d1c2a16e609", "title": "CrowdFaceDB: Database and benchmarking for face verification in crowd", "year": "2018", "pdf": "http://doi.org/10.1016/j.patrec.2017.12.028"}, {"id": "e4232e8fd566a7289ccb33f732c9093c9beb84a6", "title": "UHDB31: A Dataset for Better Understanding Face Recognition Across Pose and Illumination Variation", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265511"}, {"id": "2d1f86e2c7ba81392c8914edbc079ac64d29b666", "title": "Deep Heterogeneous Feature Fusion for Template-Based Face Recognition", "year": 2017, "pdf": "https://doi.org/10.1109/WACV.2017.71"}, {"id": "03f7041515d8a6dcb9170763d4f6debd50202c2b", "title": "Clustering Millions of Faces by Identity", "year": 2018, "pdf": "http://biometrics.cse.msu.edu/Publications/Face/OttoWangJain_ClusteringMillionsOfFacesByIdentity_TPAMI17.pdf"}, {"id": "6afe1f668eea8dfdd43f0780634073ed4545af23", "title": "Deep learning for content-based video retrieval in film and television production", "year": 2017, "pdf": "https://doi.org/10.1007/s11042-017-4962-9"}, {"id": "be72b20247fb4dc4072d962ced77ed89aa40372f", "title": "Efficient Facial Representations for Age, Gender and Identity Recognition in Organizing Photo Albums using Multi-output CNN", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.07718.pdf"}, {"id": "14aed1b7c08c941b1d2ba6c1c2ffb6255c306c74", "title": "Robust watch-list screening using dynamic ensembles of SVMs based on multiple face representations", "year": "2016", "pdf": "http://doi.org/10.1007/s00138-016-0820-4"}, {"id": "3933e323653ff27e68c3458d245b47e3e37f52fd", "title": "Evaluation of a 3 D-aided Pose Invariant 2 D Face Recognition System", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/3933/e323653ff27e68c3458d245b47e3e37f52fd.pdf"}, {"id": "7f6cd03e3b7b63fca7170e317b3bb072ec9889e0", "title": "A Face Recognition Signature Combining Patch-based Features with Soft Facial Attributes", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/7f6c/d03e3b7b63fca7170e317b3bb072ec9889e0.pdf"}, {"id": "28cd46a078e8fad370b1aba34762a874374513a5", "title": "cvpaper.challenge in 2016: Futuristic Computer Vision through 1, 600 Papers Survey", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/28cd/46a078e8fad370b1aba34762a874374513a5.pdf"}, {"id": "da2b2be4c33e221c7f417875a6c5c74043b1b227", "title": "Score normalization in stratified biometric systems", "year": 2017, "pdf": "https://doi.org/10.1109/BTAS.2017.8272712"}, {"id": "8c7f4c11b0c9e8edf62a0f5e6cf0dd9d2da431fa", "title": "Dataset Augmentation for Pose and Lighting Invariant Face Recognition", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/8c7f/4c11b0c9e8edf62a0f5e6cf0dd9d2da431fa.pdf"}, {"id": "2c92839418a64728438c351a42f6dc5ad0c6e686", "title": "Pose-Aware Face Recognition in the Wild", "year": 2016, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Masi_Pose-Aware_Face_Recognition_CVPR_2016_paper.pdf"}, {"id": "b2ae5c496fe01bb2e2dee107f75b82c6a2a23374", "title": "Attention-Based Template Adaptation for Face Verification", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.116"}, {"id": "b7ec41005ce4384e76e3be854ecccd564d2f89fb", "title": "Granular Computing and Sequential Analysis of Deep Embeddings in Fast Still-to-Video Face Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8441009"}, {"id": "6f22628d34a486d73c6b46eb071200a00e3abae3", "title": "Learning Pose-Aware Models for Pose-Invariant Face Recognition in the Wild.", "year": "2018", "pdf": "https://www.ncbi.nlm.nih.gov/pubmed/29994497"}, {"id": "48499deeaa1e31ac22c901d115b8b9867f89f952", "title": "Interim Report of Final Year Project HKU-Face : A Large Scale Dataset for Deep Face Recognition", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/4849/9deeaa1e31ac22c901d115b8b9867f89f952.pdf"}, {"id": "31b05f65405534a696a847dd19c621b7b8588263", "title": "UMDFaces: An annotated face dataset for training deep networks", "year": 2017, "pdf": "http://arxiv.org/abs/1611.01484"}, {"id": "6ed27a41214716259676b6949999cdf4b12d0bdd", "title": "A Sparse and Inlier Based SIFT and SURF Features for Automated Face Recognition", "year": 2016, "pdf": null}, {"id": "b908487b30002d5ae1ebd819880a713494a45a40", "title": "Developmental Network: An Internal Emergent Object Feature Learning", "year": "2017", "pdf": "http://doi.org/10.1007/s11063-017-9734-z"}, {"id": "8da32ff9e3759dc236878ac240728b344555e4e9", "title": "Investigating Nuisance Factors in Face Recognition with DCNN Representation", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014820"}, {"id": "832a9584e85af1675d49ee35fd13283b21ce3a3f", "title": "Generating Photo-Realistic Training Data to Improve Face Recognition Accuracy", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.00112.pdf"}, {"id": "b971266b29fcecf1d5efe1c4dcdc2355cb188ab0", "title": "On the Reconstruction of Face Images from Deep Face Templates.", "year": "2018", "pdf": "https://arxiv.org/pdf/1703.00832.pdf"}, {"id": "d92084e376a795d3943df577d3b3f3b7d12eeae5", "title": "Face and Image Representation in Deep CNN Features", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.85"}, {"id": "dc13229afbbc8b7a31ed5adfe265d971850c0976", "title": "Learning from Millions of 3 D Scans for Large-scale 3 D Face Recognition", "year": "2017", "pdf": null}, {"id": "df51dfe55912d30fc2f792561e9e0c2b43179089", "title": "Face Hallucination Using Linear Models of Coupled Sparse Support", "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2717181"}, {"id": "dfaa547451aae219cd2ca7a761e6c16c1e1d0add", "title": "Representation Learning by Rotating Your Faces", "year": "2018", "pdf": "https://arxiv.org/pdf/1705.11136.pdf"}, {"id": "2f7e9b45255c9029d2ae97bbb004d6072e70fa79", "title": "cvpaper.challenge in 2015 - A review of CVPR2015 and DeepSurvey", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/2f7e/9b45255c9029d2ae97bbb004d6072e70fa79.pdf"}, {"id": "ec39e9c21d6e2576f21936b1ecc1574dadaf291e", "title": "Pose-Robust Face Verification by Exploiting Competing Tasks", "year": 2017, "pdf": "https://doi.org/10.1109/WACV.2017.130"}, {"id": "0db8e6eb861ed9a70305c1839eaef34f2c85bbaf", "title": "Towards Large-Pose Face Frontalization in the Wild", "year": 2017, "pdf": "https://arxiv.org/pdf/1704.06244v1.pdf"}, {"id": "e79bacc03152ea55343e6af97bcd17d8904cf5ef", "title": "Recursive Spatial Transformer (ReST) for Alignment-Free Face Recognition", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237669"}, {"id": "b5ca8d4f259f35c1f3edfd9f108ce29881e478b0", "title": "Disentangled Representation Learning GAN for Pose-Invariant Face Recognition", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099624"}, {"id": "4ac3cd8b6c50f7a26f27eefc64855134932b39be", "title": "Robust Facial Landmark Detection via a Fully-Convolutional Local-Global Context Network", "year": "", "pdf": "https://pdfs.semanticscholar.org/4ac3/cd8b6c50f7a26f27eefc64855134932b39be.pdf"}, {"id": "a3f69a073dcfb6da8038607a9f14eb28b5dab2db", "title": "3D-Aided Deep Pose-Invariant Face Recognition", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/a3f6/9a073dcfb6da8038607a9f14eb28b5dab2db.pdf"}, {"id": "f3d9e347eadcf0d21cb0e92710bc906b22f2b3e7", "title": "NosePose: a competitive, landmark-free methodology for head pose estimation in the wild", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/f3d9/e347eadcf0d21cb0e92710bc906b22f2b3e7.pdf"}, {"id": "1fc249ec69b3e23856b42a4e591c59ac60d77118", "title": "Evaluation of a 3D-aided pose invariant 2D face recognition system", "year": 2017, "pdf": "https://doi.org/10.1109/BTAS.2017.8272729"}, {"id": "75249ebb85b74e8932496272f38af274fbcfd696", "title": "Face Identification in Large Galleries", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/7524/9ebb85b74e8932496272f38af274fbcfd696.pdf"}, {"id": "4d90d7834ae25ee6176c096d5d6608555766c0b1", "title": "Face and Body Association for Video-Based Face Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354115"}, {"id": "e4c3d5d43cb62ac5b57d74d55925bdf76205e306", "title": "Average Biased ReLU Based CNN Descriptor for Improved Face Retrieval", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/e4c3/d5d43cb62ac5b57d74d55925bdf76205e306.pdf"}, {"id": "ef230e3df720abf2983ba6b347c9d46283e4b690", "title": "QUIS-CAMPI: an annotated multi-biometrics data feed from surveillance scenarios", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/ef23/0e3df720abf2983ba6b347c9d46283e4b690.pdf"}, {"id": "5e39deb4bff7b887c8f3a44dfe1352fbcde8a0bd", "title": "Supervised COSMOS Autoencoder: Learning Beyond the Euclidean Loss!", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.06221.pdf"}, {"id": "cdae8e9cc9d605856cf5709b2fdf61f722d450c1", "title": "Deep Learning for Biometrics : A Survey KALAIVANI SUNDARARAJAN", "year": "2018", "pdf": null}, {"id": "2ad7cef781f98fd66101fa4a78e012369d064830", "title": "Neural Aggregation Network for Video Face Recognition", "year": 2017, "pdf": "http://arxiv.org/pdf/1603.05474v1.pdf"}, {"id": "cfd4004054399f3a5f536df71f9b9987f060f434", "title": "Person Recognition in Social Media Photos", "year": "2018", "pdf": "https://arxiv.org/pdf/1710.03224.pdf"}, {"id": "727d03100d4a8e12620acd7b1d1972bbee54f0e6", "title": "von Mises-Fisher Mixture Model-based Deep learning: Application to Face Verification", "year": "2017", "pdf": "https://arxiv.org/pdf/1706.04264.pdf"}, {"id": "39ed31ced75e6151dde41944a47b4bdf324f922b", "title": "Pose-Guided Photorealistic Face Rotation", "year": "", "pdf": "https://pdfs.semanticscholar.org/39ed/31ced75e6151dde41944a47b4bdf324f922b.pdf"}, {"id": "f7ba77d23a0eea5a3034a1833b2d2552cb42fb7a", "title": "LOTS about attacking deep features", "year": "2017", "pdf": "https://arxiv.org/pdf/1611.06179.pdf"}, {"id": "282a3ee79a08486f0619caf0ada210f5c3572367", "title": "Accelerated Training for Massive Classification via Dynamic Class Selection", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/282a/3ee79a08486f0619caf0ada210f5c3572367.pdf"}, {"id": "49258cc3979103681848284470056956b77caf80", "title": "EPAT: Euclidean Perturbation Analysis and Transform - An Agnostic Data Adaptation Framework for Improving Facial Landmark Detectors", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961745"}, {"id": "0cb2dd5f178e3a297a0c33068961018659d0f443", "title": "IARPA Janus Benchmark-B Face Dataset", "year": 2017, "pdf": "http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf"}, {"id": "5905b4610389cd3b11a3a1ce06c05fee36a97f86", "title": "Unconstrained Face Recognition Using a Set-to-Set Distance Measure on Deep Learned Features", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7936556"}, {"id": "3d78c144672c4ee76d92d21dad012bdf3c3aa1a0", "title": "Unconstrained Still/Video-Based Face Verification with Deep Convolutional Neural Networks", "year": "2017", "pdf": "http://doi.org/10.1007/s11263-017-1029-3"}, {"id": "054738ce39920975b8dcc97e01b3b6cc0d0bdf32", "title": "Towards the design of an end-to-end automated system for image and video-based recognition", "year": 2016, "pdf": "https://doi.org/10.1109/ITA.2016.7888183"}, {"id": "28d4e027c7e90b51b7d8908fce68128d1964668a", "title": "Level Playing Field for Million Scale Face Recognition", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.00393.pdf"}, {"id": "098363b29eef1471c494382338687f2fe98f6e15", "title": "Metadata-Based Feature Aggregation Network for Face Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411212"}, {"id": "e11bc0f7c73c04d38b7fb80bd1ca886495a4d43c", "title": "\u201cA Leopard Cannot Change Its Spots\u201d: Improving Face Recognition Using 3D-Based Caricatures", "year": "2019", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8382306"}, {"id": "6005a30bf103164fe3410185976b6b8b36537aca", "title": "Communication, Networks and Computing", "year": "2018", "pdf": "http://doi.org/10.1007/978-981-13-2372-0"}, {"id": "e988be047b28ba3b2f1e4cdba3e8c94026139fcf", "title": "Multi-Task Convolutional Neural Network for Pose-Invariant Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1702.04710.pdf"}, {"id": "d6bdc70d259b38bbeb3a78db064232b4b4acc88f", "title": "Video-Based Face Association and Identification", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.27"}, {"id": "6097ea6fd21a5f86a10a52e6e4dd5b78a436d5bf", "title": "Multi-Region bilinear convolutional neural networks for person re-identification", "year": 2017, "pdf": "http://arxiv.org/pdf/1512.05300v3.pdf"}, {"id": "fab60b3db164327be8588bce6ce5e45d5b882db6", "title": "Maximum A Posteriori Estimation of Distances Between Deep Features in Still-to-Video Face Recognition", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/fab6/0b3db164327be8588bce6ce5e45d5b882db6.pdf"}, {"id": "91d513af1f667f64c9afc55ea1f45b0be7ba08d4", "title": "Automatic Face Image Quality Prediction", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/91d5/13af1f667f64c9afc55ea1f45b0be7ba08d4.pdf"}]}
\ No newline at end of file diff --git a/site/datasets/unknown/images_of_groups.json b/site/datasets/unknown/images_of_groups.json new file mode 100644 index 00000000..a132c909 --- /dev/null +++ b/site/datasets/unknown/images_of_groups.json @@ -0,0 +1 @@ +{"id": "21d9d0deed16f0ad62a4865e9acf0686f4f15492", "paper": {"paper_id": "21d9d0deed16f0ad62a4865e9acf0686f4f15492", "key": "images_of_groups", "title": "Understanding images of groups of people", "year": 2009, "pdf": "http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf", "address": "", "name": "Images of Groups"}, "citations": [{"id": "e92c934c047d0ec23e7ed3a749e14a0150dc1bc8", "title": "Privacy-Preserving Photo Sharing based on a Public Key Infrastructure", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/e92c/934c047d0ec23e7ed3a749e14a0150dc1bc8.pdf"}, {"id": "0313924b600ebb8f608705d96c06b133b3b9627a", "title": "Deciphering the Crowd: Modeling and Identification of Pedestrian Group Motion", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/0313/924b600ebb8f608705d96c06b133b3b9627a.pdf"}, {"id": "e7b6887cd06d0c1aa4902335f7893d7640aef823", "title": "Modelling of Facial Aging and Kinship: A Survey", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/e7b6/887cd06d0c1aa4902335f7893d7640aef823.pdf"}, {"id": "3bb5dc57a828cebd484750a51d9cdcbaa51d8f96", "title": "A collaborative approach for face verification and attributes refinement", "year": 2014, "pdf": "https://doi.org/10.1016/j.ins.2014.03.045"}, {"id": "8df57cc014ef7079e32bd7928dfde7e78430789a", "title": "FASTA-ELM: A fast adaptive shrinkage/thresholding algorithm for extreme learning machine and its application to gender recognition", "year": "2017", "pdf": "http://doi.org/10.1016/j.neucom.2016.09.046"}, {"id": "20b8a76e988e796f0f225876a69842f6839e4c98", "title": "Real-time Gender Recognition for Uncontrolled Environment of Real-life Images", "year": "2010", "pdf": "https://pdfs.semanticscholar.org/20b8/a76e988e796f0f225876a69842f6839e4c98.pdf"}, {"id": "0a85bdff552615643dd74646ac881862a7c7072d", "title": "Beyond frontal faces: Improving Person Recognition using multiple cues", "year": 2015, "pdf": "https://doi.org/10.1109/CVPR.2015.7299113"}, {"id": "25885e9292957feb89dcb4a30e77218ffe7b9868", "title": "Analyzing the Affect of a Group of People Using Multi-modal Framework", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/2588/5e9292957feb89dcb4a30e77218ffe7b9868.pdf"}, {"id": "705375260ffd0ed261fb6a85d57126f7537bef1d", "title": "A deep look into group happiness prediction from images", "year": 2016, "pdf": "http://doi.acm.org/10.1145/2993148.2997628"}, {"id": "63a4105adbe182e67d8fd324de5c84a6df444294", "title": "Gender classification by LPQ features from intensity and Monogenic images", "year": 2017, "pdf": null}, {"id": "05d082daa3e6f86adeb78b26e8cf07b94b418377", "title": "Photo Selection for Family Album using Deep Neural Networks", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3209697"}, {"id": "930a6ea926d1f39dc6a0d90799d18d7995110862", "title": "Privacy-preserving photo sharing based on a secure JPEG", "year": 2015, "pdf": "https://infoscience.epfl.ch/record/205859/files/BigSecurity_2015_ProShare.pdf?version=1"}, {"id": "50ff21e595e0ebe51ae808a2da3b7940549f4035", "title": "Age Group and Gender Estimation in the Wild With Deep RoR Architecture", "year": 2017, "pdf": "http://export.arxiv.org/pdf/1710.02985"}, {"id": "f1280f76933ba8b7f4a6b8662580504f02bb4ab6", "title": "Gender Classification by Deep Learning on Millions of Weakly Labelled Images", "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7836703"}, {"id": "eba4cfd76f99159ccc0a65cab0a02db42b548d85", "title": "Spoken Attributes: Mixing Binary and Relative Attributes to Say the Right Thing", "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751379"}, {"id": "633c851ebf625ad7abdda2324e9de093cf623141", "title": "Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961727"}, {"id": "0965a62c9c354d2c7175e313ade9e38120f1bd4e", "title": "Efficient Face Detection Method using Modified Hausdorff Distance Method with C 4 . 5 Classifier and Canny Edge Detection", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/8f73/bd18521af9e731de5c0a26de0b3f16887b15.pdf"}, {"id": "13d3b696cdbec99b2fdbb34e9d031bf8c683f112", "title": "Family Photo Recognition via Multiple Instance Learning", "year": 2017, "pdf": "http://doi.acm.org/10.1145/3078971.3079036"}, {"id": "1d7ecdcb63b20efb68bcc6fd99b1c24aa6508de9", "title": "The Hidden Sides of Names—Face Modeling with First Name Attributes", "year": 2014, "pdf": "https://web.stanford.edu/~bgirod/pdfs/ChenHuizhongTransPAMISep2014.pdf"}, {"id": "37b6d6577541ed991435eaf899a2f82fdd72c790", "title": "Vision-based Human Gender Recognition: A Survey", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/37b6/d6577541ed991435eaf899a2f82fdd72c790.pdf"}, {"id": "c030de7fc30b0fbfbfc7719f6b22a66589435dfc", "title": "Social Photo Tagging Recommendation Using Community-Based Group Associations", "year": 2012, "pdf": "https://doi.org/10.1109/ICMEW.2012.46"}, {"id": "c9c2de3628be7e249722b12911bebad84b567ce6", "title": "Age and gender recognition in the wild with deep attention", "year": 2017, "pdf": "https://doi.org/10.1016/j.patcog.2017.06.028"}, {"id": "df310591dfba9672252d693bc87da73c246749c9", "title": "Fusion of Holistic and Part Based Features for Gender Classification in the Wild", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/df31/0591dfba9672252d693bc87da73c246749c9.pdf"}, {"id": "69a55c30c085ad1b72dd2789b3f699b2f4d3169f", "title": "Automatic Happiness Strength Analysis of a Group of People using Facial Expressions", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/69a5/5c30c085ad1b72dd2789b3f699b2f4d3169f.pdf"}, {"id": "48a42303559ea518ba06f54a8cfce4226bb0e77e", "title": "Urban tribes: Analyzing group photos from a social perspective", "year": 2012, "pdf": "http://vision.ucsd.edu/sites/default/files/urbantribes.pdf"}, {"id": "0d3ff34d8490a9a53de1aac1dea70172cb02e013", "title": "Cross-Database Evaluation of Normalized Raw Pixels for Gender Recognition under Unconstrained Settings", "year": 2014, "pdf": "https://doi.org/10.1109/ICPR.2014.542"}, {"id": "ff81f2b4ff19f043b61b5f720643448711ebdb6d", "title": "Generic to Specific Recognition Models for Membership Analysis in Group Videos", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961784"}, {"id": "d84568d42a02b6d365889451f208f423edb1f0f3", "title": "Age Synthesis and Estimation From Face Image Ms", "year": "2014", "pdf": "https://pdfs.semanticscholar.org/d845/68d42a02b6d365889451f208f423edb1f0f3.pdf"}, {"id": "eef725f4130ee326954e84e5f4ddf487da63c94e", "title": "Towards public events image content understanding", "year": 2010, "pdf": null}, {"id": "9636c7d3643fc598dacb83d71f199f1d2cc34415", "title": "Automatic facial attribute analysis via adaptive sparse representation of random patches", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/9636/c7d3643fc598dacb83d71f199f1d2cc34415.pdf"}, {"id": "19e32a17c7091dc88cab9e858110deb7769b3f5c", "title": "Using the Surrounding WEB Content of Pictures to Generate Candidates for Photo Annotation", "year": 2013, "pdf": null}, {"id": "3d97f739ae76c8db1146da4aaeb0dc1ef3d31c33", "title": "Donn\u00e9es multimodales pour l \u2019 analyse d \u2019 image", "year": "2011", "pdf": "https://pdfs.semanticscholar.org/c2b3/d8ac1f02e63809c74d2eacb37329ec139ce2.pdf"}, {"id": "0a68747d001aba014acd3b6ec83ba9534946a0da", "title": "Automatic Group Happiness Intensity Analysis", "year": 2015, "pdf": "http://staff.estem-uc.edu.au/roland/files/2009/05/Dhall_Goecke_Gedeon_TAC2015_AutomaticGroupHappinessIntensityAnalysis.pdf"}, {"id": "20aa8348cf4847b9f72fe8ddbca8a2594ea23856", "title": "Learning ordinal discriminative features for age estimation", "year": 2012, "pdf": "https://pdfs.semanticscholar.org/9d7b/19164bad2048cfc49bd018932b389eebd32b.pdf"}, {"id": "166186e551b75c9b5adcc9218f0727b73f5de899", "title": "Automatic Age and Gender Recognition in Human Face Image Dataset using Convolutional Neural Network System", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/1661/86e551b75c9b5adcc9218f0727b73f5de899.pdf"}, {"id": "1e1a67a78badc619b2f9938e4a03922dcbee0fb6", "title": "Food/Non-food Image Classification and Food Categorization using Pre-Trained GoogLeNet Model", "year": 2016, "pdf": "http://dl.acm.org/citation.cfm?id=2986039"}, {"id": "0d0041aefb16c5f7b1e593b440bb3df7b05b411c", "title": "Secure JPEG scrambling enabling privacy in photo sharing", "year": 2015, "pdf": "http://translectures.videolectures.net/site/normal_dl/tag=980911/fgconference2015_ebrahimi_photo_sharing_01.pdf"}, {"id": "da24f3e196c5345ce08dfcc835574035da197f48", "title": "A Global Alignment Kernel based Approach for Group-level Happiness Intensity Estimation", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.03313.pdf"}, {"id": "9892fca93b59750c89ca123a21805b928dd56047", "title": "3D Visual Proxemics: Recognizing Human Interactions in 3D from a Single Image", "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619281"}, {"id": "aca273a9350b10b6e2ef84f0e3a327255207d0f5", "title": "On soft biometrics", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/efb2/4d35d8f6a46e1ff3800a2481bc7e681e255e.pdf"}, {"id": "74f21f2edfa985280be63f8a01aa00541f3a5625", "title": "People Groping by Spatio-Temporal Features of Trajectories", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/769a/80886fd7ccf65d0a1d51387250ccc0a8a2f3.pdf"}, {"id": "f1748303cc02424704b3a35595610890229567f9", "title": "Learning-based encoding with soft assignment for age estimation under unconstrained imaging conditions", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/f174/8303cc02424704b3a35595610890229567f9.pdf"}, {"id": "3edc43e336be075dca77c7e173b555b6c14274d8", "title": "Travelmedia: An intelligent management system for media captured in travel", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/3edc/43e336be075dca77c7e173b555b6c14274d8.pdf"}, {"id": "1be498d4bbc30c3bfd0029114c784bc2114d67c0", "title": "Age and Gender Estimation of Unfiltered Faces", "year": 2014, "pdf": "http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf"}, {"id": "24e6e8d725f08c44dac42a588f41092e392a3514", "title": "Automatic age estimation based on deep learning algorithm", "year": 2016, "pdf": "https://doi.org/10.1016/j.neucom.2015.09.115"}, {"id": "974b32c4b74cc6a15f44cf8b5874d6f20273b21f", "title": "Leveraging geometry and appearance cues for recognizing family photos", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163147"}, {"id": "8b8728edc536020bc4871dc66b26a191f6658f7c", "title": "Robust gender recognition by exploiting facial attributes dependencies", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/8b87/28edc536020bc4871dc66b26a191f6658f7c.pdf"}, {"id": "1937d32117b6ce27188408ec984f0bcf2375e6a7", "title": "Tag refinement in an image folksonomy using visual similarity and tag co-occurrence statistics", "year": 2010, "pdf": "https://doi.org/10.1016/j.image.2010.10.002"}, {"id": "9ff1a4754391a5cf91c998eeaf75b93a4f5f2451", "title": "Face-based multiple instance analysis for smart electronics billboard", "year": 2011, "pdf": "https://doi.org/10.1007/s11042-011-0746-9"}, {"id": "3c8da376576938160cbed956ece838682fa50e9f", "title": "Aiding face recognition with social context association rule based re-ranking", "year": 2014, "pdf": "https://doi.org/10.1109/BTAS.2014.6996266"}, {"id": "14c988aa9086207b337dcc5611aad08422129b42", "title": "Human Relative Position Detection Based on Mutual Occlusion", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/14c9/88aa9086207b337dcc5611aad08422129b42.pdf"}, {"id": "79f4bd6b3f0ebc4b81c13e7720c42285464a858b", "title": "Can discriminative cues aid face recognition across age?", "year": 2011, "pdf": "https://doi.org/10.1109/FG.2011.5771399"}, {"id": "9857eeded6b7608ff862174742b38946102f5008", "title": "Interpretable Facial Relational Network Using Relational Importance", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/9857/eeded6b7608ff862174742b38946102f5008.pdf"}, {"id": "79581c364cefe53bff6bdd224acd4f4bbc43d6d4", "title": "Descriptors and regions of interest fusion for in- and cross-database gender classification in the wild", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/7958/1c364cefe53bff6bdd224acd4f4bbc43d6d4.pdf"}, {"id": "035fb6317dd793a51c57ccce6f79c4aa1437ea93", "title": "Gender prediction from mobile ocular images: A feasibility study", "year": 2017, "pdf": null}, {"id": "58f94993d32c01b617adbb7a782cddb5b2dc461d", "title": "Estimating heights from photo collections: a data-driven approach", "year": 2014, "pdf": "http://doi.acm.org/10.1145/2660460.2660466"}, {"id": "132f88626f6760d769c95984212ed0915790b625", "title": "Exploring Entity Resolution for Multimedia Person Identification", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/132f/88626f6760d769c95984212ed0915790b625.pdf"}, {"id": "a44ba9b6fad29cd3cf3a72b868f42e32297221fc", "title": "A deep analysis on age estimation", "year": "2015", "pdf": "http://doi.org/10.1016/j.patrec.2015.06.006"}, {"id": "37eb666b7eb225ffdafc6f318639bea7f0ba9a24", "title": "Age, Gender and Race Estimation from Unconstrained Face Images", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/37eb/666b7eb225ffdafc6f318639bea7f0ba9a24.pdf"}, {"id": "151c342b33d9b8332c92185307f8d9dc9baa8047", "title": "Density-aware person detection and tracking in crowds", "year": "2011", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6126526"}, {"id": "0dfe04d3fd96dee4952bac36bafd3778c106dc1d", "title": "A new deep-learning framework for group emotion recognition", "year": 2017, "pdf": "http://doi.acm.org/10.1145/3136755.3143014"}, {"id": "ea69a469cf2828f7ec2a1df5d0fd4f669ce6ced1", "title": "MDID: A multiply distorted image database for image quality assessment", "year": 2017, "pdf": "https://doi.org/10.1016/j.patcog.2016.07.033"}, {"id": "3f9a7d690db82cf5c3940fbb06b827ced59ec01e", "title": "VIP: Finding important people in images", "year": "2015", "pdf": "https://arxiv.org/pdf/1502.05678.pdf"}, {"id": "8879083463a471898ff9ed9403b84db277be5bf6", "title": "Regression Facial Attribute Classification via simultaneous dictionary learning", "year": 2017, "pdf": "https://doi.org/10.1016/j.patcog.2016.08.031"}, {"id": "34698ba3bf6b093781fc1bb89f1d0ba7c93ede3b", "title": "Human Age Estimation Based on Locality and Ordinal Information", "year": 2015, "pdf": "https://doi.org/10.1109/TCYB.2014.2376517"}, {"id": "66af2afd4c598c2841dbfd1053bf0c386579234e", "title": "Context-assisted face clustering framework with human-in-the-loop", "year": 2014, "pdf": "http://www.ics.uci.edu/~dvk/pub/J17_IJMIR14_Liyan.pdf"}, {"id": "9d1cebed7672210f9c411c5ba422a931980da833", "title": "Relational Learning Based Happiness Intensity Analysis in a Group", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ISM.2016.0078"}, {"id": "4f0e82a326e9ff158a5092ba6206877cf961e19c", "title": "Facial age estimation and gender classification using multi level local phase quantization", "year": 2015, "pdf": null}, {"id": "d1eab889815d3686e3c3e0f80246b058823f10e1", "title": "Robust gender recognition for uncontrolled environment of real-life images", "year": 2010, "pdf": "https://doi.org/10.1109/TCE.2010.5606301"}, {"id": "3b9d94752f8488106b2c007e11c193f35d941e92", "title": "Appearance, Visual and Social Ensembles for Face Recognition in Personal Photo Collections", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/3b9d/94752f8488106b2c007e11c193f35d941e92.pdf"}, {"id": "b22f5f0929704752a16d0f65f00a5161a059d8e3", "title": "On soft biometrics", "year": "2015", "pdf": "http://doi.org/10.1016/j.patrec.2015.08.006"}, {"id": "c866a2afc871910e3282fd9498dce4ab20f6a332", "title": "Surveillance Face Recognition Challenge", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.09691.pdf"}, {"id": "1f8304f4b51033d2671147b33bb4e51b9a1e16fe", "title": "Beyond Trees: MAP Inference in MRFs via Outer-Planar Decomposition", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/1f83/04f4b51033d2671147b33bb4e51b9a1e16fe.pdf"}, {"id": "ffd152065390103497e29f00acc040567e1481b6", "title": "Group-level arousal and valence recognition in static images: Face, body and context", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7284862"}, {"id": "8f89aed13cb3555b56fccd715753f9ea72f27f05", "title": "Attended End-to-end Architecture for Age Estimation from Facial Expression Videos", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/8f89/aed13cb3555b56fccd715753f9ea72f27f05.pdf"}, {"id": "8a84fd21ffe7acfa207439d2e30ed15e491b1991", "title": "Bikers Are Like Tobacco Shops, Formal Dressers Are Like Suits: Recognizing Urban Tribes with Caffe", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7045975"}, {"id": "9939498315777b40bed9150d8940fc1ac340e8ba", "title": "ChaLearn Looking at People and Faces of the World: Face AnalysisWorkshop and Challenge 2016", "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789583"}, {"id": "a01f9461bc8cf8fe40c26d223ab1abea5d8e2812", "title": "Facial Age Estimation Through the Fusion of Texture and Local Appearance Descriptors", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/a01f/9461bc8cf8fe40c26d223ab1abea5d8e2812.pdf"}, {"id": "35e4b6c20756cd6388a3c0012b58acee14ffa604", "title": "Gender Classification in Large Databases", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/35e4/b6c20756cd6388a3c0012b58acee14ffa604.pdf"}, {"id": "bff80048f4efd4fc2082a37b7c8f645cdd9cc0c8", "title": "Accelerating a Computer Vision Algorithm on a Mobile SoC Using CPU-GPU Co-processing - A Case Study on Face Detection", "year": 2016, "pdf": "http://doi.acm.org/10.1145/2897073.2897081"}, {"id": "2c8743089d9c7df04883405a31b5fbe494f175b4", "title": "Real-time full-body human gender recognition in (RGB)-D data", "year": 2015, "pdf": "http://srl.informatik.uni-freiburg.de/publicationsdir/linderICRA15.pdf"}, {"id": "b2749caec0094e186d3ee850151c899b8508f47a", "title": "AVIUE — Artificial vision to improve the user experience", "year": 2013, "pdf": null}, {"id": "28126d165f73c2a18600a9b0440f5e80191d52d9", "title": "Clock-Modeled Ternary Spatial Relations for Visual Scene Analysis", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/3791/5016f5540b265866b9a63dbb5106874c26ea.pdf"}, {"id": "d6a9ea9b40a7377c91c705f4c7f206a669a9eea2", "title": "Visual Representations for Fine-grained Categorization", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/d6a9/ea9b40a7377c91c705f4c7f206a669a9eea2.pdf"}, {"id": "ae753fd46a744725424690d22d0d00fb05e53350", "title": "Describing Clothing by Semantic Attributes", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/ae75/3fd46a744725424690d22d0d00fb05e53350.pdf"}, {"id": "cf5384353e9285d92ecc59395cb1855168a22fcf", "title": "Learning people co-occurrence relations by using relevance feedback for retrieving group photos", "year": 2011, "pdf": "http://doi.acm.org/10.1145/1991996.1992053"}, {"id": "2cbb4a2f8fd2ddac86f8804fd7ffacd830a66b58", "title": "Age and gender classification using convolutional neural networks", "year": 2015, "pdf": "http://www.openu.ac.il/home/hassner/projects/cnn_agegender/CNN_AgeGenderEstimation.pdf"}, {"id": "90ae02da16b750a9fd43f8a38440f848309c2fe0", "title": "A review of facial gender recognition", "year": 2015, "pdf": "https://doi.org/10.1007/s10044-015-0499-6"}, {"id": "be65ca1665e041a8d801759fec9a918ef7ff5c39", "title": "Spatial Face Context with Gender Information for Group Photo Similarity Assessment", "year": 2014, "pdf": "https://doi.org/10.1109/ICPR.2014.462"}, {"id": "8e88a651e7bc9d200fcd7af0ca93690e0929f469", "title": "Efficient graph based spatial face context representation and matching", "year": 2013, "pdf": "https://doi.org/10.1109/ICASSP.2013.6638004"}, {"id": "890a4a3f7006a39b614d8836d67cc46a6a2a36c9", "title": "Multi-scale score level fusion of local descriptors for gender classification in the wild", "year": "2016", "pdf": "http://doi.org/10.1007/s11042-016-3653-2"}, {"id": "341ed69a6e5d7a89ff897c72c1456f50cfb23c96", "title": "DAGER: Deep Age, Gender and Emotion Recognition Using Convolutional Neural Network", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/cd7f/26c430363f90e530824446b3a4c85cfb94e5.pdf"}, {"id": "52887969107956d59e1218abb84a1f834a314578", "title": "Travel Recommendation by Mining People Attributes and Travel Group Types From Community-Contributed Photos", "year": 2013, "pdf": "http://www.cmlab.csie.ntu.edu.tw/~yanying/paper/chen13travel.pdf"}, {"id": "c1c34a3ab7815af1b9bcaf2822e4b9da8505f915", "title": "Image transmorphing with JPEG", "year": 2015, "pdf": "https://infoscience.epfl.ch/record/210902/files/15_ICIP_Transmorph.pdf"}, {"id": "984edce0b961418d81203ec477b9bfa5a8197ba3", "title": "Customer and target individual face analysis for retail analytics", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369732"}, {"id": "dc2e805d0038f9d1b3d1bc79192f1d90f6091ecb", "title": "Face Recognition and Facial Attribute Analysis from Unconstrained Visual Data", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/dc2e/805d0038f9d1b3d1bc79192f1d90f6091ecb.pdf"}, {"id": "767723f2e8ea0a39ca2b309c83a4c1d27c1e2c54", "title": "Gender-aided people recognition in photo collections", "year": 2013, "pdf": "https://doi.org/10.1109/MMSP.2013.6659260"}, {"id": "4f7821d28033be3757ad567e0c4e79a43e1b4a72", "title": "People re-identification across non-overlapping cameras using group features", "year": 2016, "pdf": "https://doi.org/10.1016/j.cviu.2015.06.011"}, {"id": "15f3d47b48a7bcbe877f596cb2cfa76e798c6452", "title": "Automatic face analysis tools for interactive digital games", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/15f3/d47b48a7bcbe877f596cb2cfa76e798c6452.pdf"}, {"id": "16c884be18016cc07aec0ef7e914622a1a9fb59d", "title": "Exploiting Multimodal Data for Image Understanding", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/16c8/84be18016cc07aec0ef7e914622a1a9fb59d.pdf"}, {"id": "fd30eb1d0cb417202c7cb499aca1d81eb932932b", "title": "Robust gender recognition for real-time surveillance system", "year": 2010, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICME.2010.5583879"}, {"id": "fd51665efe2520a55aa58b2f1863a3bd9870529f", "title": "Understanding Compressive Adversarial Privacy", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.08911.pdf"}, {"id": "97d1d561362a8b6beb0fdbee28f3862fb48f1380", "title": "Age Synthesis and Estimation via Faces: A Survey", "year": 2010, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.36"}, {"id": "01d8c844848d3aae4da671f0131d050149e15b82", "title": "Human-Centered Social Media Analytics", "year": 2014, "pdf": "https://doi.org/10.1007/978-3-319-05491-9"}, {"id": "d252e10024a22c8274ae67dbf37aa854d75a85f2", "title": "Joint Gender Classification and Age Estimation by Nearly Orthogonalizing Their Semantic Spaces", "year": "2018", "pdf": "https://arxiv.org/pdf/1609.04116.pdf"}, {"id": "4166aa3fa97e9e835056133d2140c2f405e8a9ab", "title": "Who Blocks Who: Simultaneous Segmentation of Occluded Objects", "year": 2013, "pdf": "https://doi.org/10.1007/s11390-013-1385-6"}, {"id": "6aaf91619dc41d1e442c5439fbf006965758b915", "title": "Human age classification using facial skin analysis and multi-class support vector machine", "year": 2016, "pdf": null}, {"id": "ea227e47b8a1e8f55983c34a17a81e5d3fa11cfd", "title": "Age group classification in the wild with deep RoR architecture", "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296549"}, {"id": "e29ba8930f379245035ff7d33b5693592ee491aa", "title": "Discriminative subgraphs for discovering family photos", "year": 2016, "pdf": "https://doi.org/10.1007/s41095-016-0054-4"}, {"id": "c55a6c98887b3079647d0edb4778d81bab6708f6", "title": "Self-Similarity Representation of Faces for Kin Relationships", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/c55a/6c98887b3079647d0edb4778d81bab6708f6.pdf"}, {"id": "2c5b5a5e4b8cd001e535118c2fa90bff95d51648", "title": "Combining Facial Dynamics With Appearance for Age Estimation", "year": 2015, "pdf": "https://ivi.fnwi.uva.nl/isis/publications/2015/DibekliogluTIP2015/DibekliogluTIP2015.pdf"}, {"id": "eb716dd3dbd0f04e6d89f1703b9975cad62ffb09", "title": " Visual Object Category Discovery in Images and Videos", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/eb71/6dd3dbd0f04e6d89f1703b9975cad62ffb09.pdf"}, {"id": "b972683d702a65d3ee7a25bc931a5890d1072b6b", "title": "Demographic Analysis from Biometric Data: Achievements, Challenges, and New Frontiers", "year": 2018, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2017.2669035"}, {"id": "c9adf1ff0bad0e0397ae98d4e0857192bfc3d59e", "title": "Audio-visual gender recognition in uncontrolled environment using variability modeling techniques", "year": "2014", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6996271"}, {"id": "18de899c853120a1a2cd502ebc3e970b92e1882f", "title": "Age Regression from Soft Aligned Face Images Using Low Computational Resources", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/18de/899c853120a1a2cd502ebc3e970b92e1882f.pdf"}, {"id": "422ca72f0ce9d63d2fab1ecc7f3c77e5c0fbfbff", "title": "MEG: Texture operators for multi-expert gender classification", "year": 2017, "pdf": "https://doi.org/10.1016/j.cviu.2016.09.004"}, {"id": "08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7", "title": "Understanding Kin Relationships in a Photo", "year": 2012, "pdf": "http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf"}, {"id": "b81e765ee5acc1aaedb205e09318577e4f9c18c2", "title": "Learning local features for age estimation on real-life faces", "year": 2010, "pdf": null}, {"id": "95ace502ba23a8a5543b882937de23b892112cca", "title": "Facial Dynamics Interpreter Network: What Are the Important Relations Between Local Dynamics for Facial Trait Estimation?", "year": "2018", "pdf": "https://arxiv.org/pdf/1711.10688.pdf"}, {"id": "9755554b13103df634f9b1ef50a147dd02eab02f", "title": "How Transferable Are CNN-Based Features for Age and Gender Classification?", "year": 2016, "pdf": "https://doi.org/10.1109/BIOSIG.2016.7736925"}, {"id": "12e4545d07e1793df87520f384b37a015815d2f7", "title": "Age invariant face recognition: a survey on facial aging databases, techniques and effect of aging", "year": "2018", "pdf": "http://doi.org/10.1007/s10462-018-9661-z"}, {"id": "22f656d0f8426c84a33a267977f511f127bfd7f3", "title": "From Facial Expression Recognition to Interpersonal Relation Prediction", "year": 2017, "pdf": "http://arxiv.org/abs/1609.06426"}, {"id": "6dcf418c778f528b5792104760f1fbfe90c6dd6a", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984"}, {"id": "d14badb33740b42833ccd620b344fd75c35df2b7", "title": "Group happiness assessment using geometric features and dataset balancing", "year": "2016", "pdf": "http://dl.acm.org/citation.cfm?id=2997633"}, {"id": "1135a818b756b057104e45d976546970ba84e612", "title": "Age, Gender, and Fine-Grained Ethnicity Prediction Using Convolutional Neural Networks for the East Asian Face Dataset", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.118"}, {"id": "81b4f1f6e8b993a04fb7e91842ad39691cb9e4c7", "title": "It's Not Polite to Point: Describing People with Uncertain Attributes", "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619241"}, {"id": "83272537fc29345d32f68a8ab37e4170df10d3bb", "title": "Automatic clothes segmentation for soft biometrics", "year": 2014, "pdf": "https://acceda.ulpgc.es:8443/bitstream/10553/15756/5/C089_ICIP14_postprint.pdf"}, {"id": "e295c1aa47422eb35123053038e62e9aa50a2e3a", "title": "ChaLearn Looking at People 2015: Apparent Age and Cultural Event Recognition Datasets and Results", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406389"}, {"id": "5bd2ab5f5eff88d0489d949972e50875891bc0fd", "title": "Boosting gender recognition performance with a fuzzy inference system", "year": "2015", "pdf": "http://doi.org/10.1016/j.eswa.2014.11.023"}, {"id": "4a111ca4ba39386b489f9c0a9c7949e932563ddb", "title": "Automatic Group Affect Analysis in Images via Visual Attribute and Feature Networks", "year": "2018", "pdf": "http://doi.org/10.1109/ICIP.2018.8451242"}]}
\ No newline at end of file diff --git a/site/datasets/unknown/imdb_wiki.json b/site/datasets/unknown/imdb_wiki.json new file mode 100644 index 00000000..2e4d313e --- /dev/null +++ b/site/datasets/unknown/imdb_wiki.json @@ -0,0 +1 @@ +{"id": "10195a163ab6348eef37213a46f60a3d87f289c5", "paper": {"paper_id": "10195a163ab6348eef37213a46f60a3d87f289c5", "key": "imdb_wiki", "title": "Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks", "year": 2016, "pdf": "https://doi.org/10.1007/s11263-016-0940-3", "address": "", "name": "IMDB"}, "citations": [{"id": "f46a526c423dd09a3f14f2c9a3838fb4f56fa730", "title": "Anchored Regression Networks Applied to Age Estimation and Super Resolution", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237444"}, {"id": "380dd0ddd5d69adc52defc095570d1c22952f5cc", "title": "Improving Smiling Detection with Race and Gender Diversity", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/380d/d0ddd5d69adc52defc095570d1c22952f5cc.pdf"}, {"id": "ec0104286c96707f57df26b4f0a4f49b774c486b", "title": "An Ensemble CNN2ELM for Age Estimation", "year": 2018, "pdf": "http://www.cs.newpaltz.edu/~lik/publications/Mingxing-Duan-IEEE-TIFS-2018.pdf"}, {"id": "d444368421f456baf8c3cb089244e017f8d32c41", "title": "CNN for IMU assisted odometry estimation using velodyne LiDAR", "year": "2018", "pdf": "https://arxiv.org/pdf/1712.06352.pdf"}, {"id": "fffefc1fb840da63e17428fd5de6e79feb726894", "title": "Fine-Grained Age Estimation in the wild with Attention LSTM Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.10445.pdf"}, {"id": "4522a7268facecf05769e90cae6555ac70c05cc8", "title": "Auxiliary Demographic Information Assisted Age Estimation With Cascaded Structure", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8267475"}, {"id": "c9c2de3628be7e249722b12911bebad84b567ce6", "title": "Age and gender recognition in the wild with deep attention", "year": 2017, "pdf": "https://doi.org/10.1016/j.patcog.2017.06.028"}, {"id": "305346d01298edeb5c6dc8b55679e8f60ba97efb", "title": "Fine-Grained Face Annotation Using Deep Multi-Task CNN", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/3053/46d01298edeb5c6dc8b55679e8f60ba97efb.pdf"}, {"id": "9939498315777b40bed9150d8940fc1ac340e8ba", "title": "ChaLearn Looking at People and Faces of the World: Face AnalysisWorkshop and Challenge 2016", "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789583"}, {"id": "ab3fcd9d5fbd2d0ad48fba4005899cf13e08d07e", "title": "Evaluating Automated Facial Age Estimation Techniques for Digital Forensics", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8424644"}, {"id": "8397956c7ad3bd24c6c6c0b38866e165367327c0", "title": "Social Relation Trait Discovery from Visual LifeLog Data with Facial Multi-Attribute Framework", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/8397/956c7ad3bd24c6c6c0b38866e165367327c0.pdf"}, {"id": "30b15cdb72760f20f80e04157b57be9029d8a1ab", "title": "Face Aging with Identity-Preserved Conditional Generative Adversarial Networks", "year": "", "pdf": "https://pdfs.semanticscholar.org/30b1/5cdb72760f20f80e04157b57be9029d8a1ab.pdf"}, {"id": "8a991beca5bc864bbc1e26df953fd1fbd4dcb4bd", "title": "Applying artificial intelligence to assess the impact of orthognathic treatment on facial attractiveness and estimated age.", "year": "2018", "pdf": "https://www.ncbi.nlm.nih.gov/pubmed/30087062"}, {"id": "50ff21e595e0ebe51ae808a2da3b7940549f4035", "title": "Age Group and Gender Estimation in the Wild With Deep RoR Architecture", "year": 2017, "pdf": "http://export.arxiv.org/pdf/1710.02985"}, {"id": "8e0becfc5fe3ecdd2ac93fabe34634827b21ef2b", "title": "Learning from Longitudinal Face Demonstration - Where Tractable Deep Modeling Meets Inverse Reinforcement Learning", "year": "2017", "pdf": "https://arxiv.org/pdf/1711.10520.pdf"}, {"id": "0dd74bbda5dd3d9305636d4b6f0dad85d6e19572", "title": "Heterogeneous Face Attribute Estimation: A Deep Multi-Task Learning Approach", "year": "2018", "pdf": "https://arxiv.org/pdf/1706.00906.pdf"}, {"id": "92b61b09d2eed4937058d0f9494d9efeddc39002", "title": "BoxCars: Improving Vehicle Fine-Grained Recognition using 3D Bounding Boxes in Traffic Surveillance", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/92b6/1b09d2eed4937058d0f9494d9efeddc39002.pdf"}, {"id": "2b632f090c09435d089ff76220fd31fd314838ae", "title": "Early Adaptation of Deep Priors in Age Prediction from Face Images", "year": 2017, "pdf": "http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w23/Hajibabaei_Early_Adaptation_of_ICCV_2017_paper.pdf"}, {"id": "36939e6a365e9db904d81325212177c9e9e76c54", "title": "Assessing the Accuracy of Four Popular Face Recognition Tools for Inferring Gender, Age, and Race", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/941b/5492e6ac98355fd7bc7531f846d638e814ac.pdf"}, {"id": "d00787e215bd74d32d80a6c115c4789214da5edb", "title": "Faster and Lighter Online Sparse Dictionary Learning", "year": "", "pdf": "http://pdfs.semanticscholar.org/d007/87e215bd74d32d80a6c115c4789214da5edb.pdf"}, {"id": "b8e5800dfc590f82a0f7eedefce9abebf8088d12", "title": "How to Train Your Neural Network with Dictionary Learning", "year": 2017, "pdf": "https://doi.org/10.1109/DCC.2017.87"}, {"id": "775c15a5dfca426d53c634668e58dd5d3314ea89", "title": "Image Quality-aware Deep Networks Ensemble for Efficient Gender Recognition in the Wild", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/775c/15a5dfca426d53c634668e58dd5d3314ea89.pdf"}, {"id": "8f89aed13cb3555b56fccd715753f9ea72f27f05", "title": "Attended End-to-end Architecture for Age Estimation from Facial Expression Videos", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/8f89/aed13cb3555b56fccd715753f9ea72f27f05.pdf"}, {"id": "6dcf418c778f528b5792104760f1fbfe90c6dd6a", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984"}, {"id": "65d705bbcc10f42683503b3599327c816265d951", "title": "Articulated Motion and Deformable Objects", "year": "2014", "pdf": "http://doi.org/10.1007/978-3-319-94544-6"}, {"id": "cdae8e9cc9d605856cf5709b2fdf61f722d450c1", "title": "Deep Learning for Biometrics : A Survey KALAIVANI SUNDARARAJAN", "year": "2018", "pdf": null}, {"id": "fc516a492cf09aaf1d319c8ff112c77cfb55a0e5", "title": "XBadges. Identifying and training soft skills with commercial video games. Improving persistence, risk taking & spatial reasoning with commercial video games and facial and emotional recognition system", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/fc51/6a492cf09aaf1d319c8ff112c77cfb55a0e5.pdf"}, {"id": "fc1e37fb16006b62848def92a51434fc74a2431a", "title": "A Comprehensive Analysis of Deep Regression", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/fc1e/37fb16006b62848def92a51434fc74a2431a.pdf"}, {"id": "18858cc936947fc96b5c06bbe3c6c2faa5614540", "title": "Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/03c1/fc9c3339813ed81ad0de540132f9f695a0f8.pdf"}, {"id": "a2344004f0e1409c0c9473d071a5cfd74bff0a5d", "title": "Learnable PINs: Cross-modal Embeddings for Person Identity", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.00833.pdf"}, {"id": "ae88996aad98bfa49a49d653fd9476e5982e982c", "title": "Efficient Group-n Encoding and Decoding for Facial Age Estimation", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8141981"}, {"id": "28715fc79bd5ff8dd8b6fc68a4f2641e5d1b8a08", "title": "Deep Label Distribution Learning for Apparent Age Estimation", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406402"}, {"id": "e7b6887cd06d0c1aa4902335f7893d7640aef823", "title": "Modelling of Facial Aging and Kinship: A Survey", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/e7b6/887cd06d0c1aa4902335f7893d7640aef823.pdf"}, {"id": "9e0285debd4b0ba7769b389181bd3e0fd7a02af6", "title": "From Face Images and Attributes to Attributes", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/9e02/85debd4b0ba7769b389181bd3e0fd7a02af6.pdf"}, {"id": "ea227e47b8a1e8f55983c34a17a81e5d3fa11cfd", "title": "Age group classification in the wild with deep RoR architecture", "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296549"}, {"id": "d1a8a46e1eb3769ed12f44075d63b49ccfe8c137", "title": "Feature specific analysis of a deep convolutional neural network for ageing classification", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8471767"}, {"id": "1d776bfe627f1a051099997114ba04678c45f0f5", "title": "Deployment of Customized Deep Learning based Video Analytics On Surveillance Cameras", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.10604.pdf"}, {"id": "b972683d702a65d3ee7a25bc931a5890d1072b6b", "title": "Demographic Analysis from Biometric Data: Achievements, Challenges, and New Frontiers", "year": 2018, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2017.2669035"}, {"id": "fcc6fd9b243474cd96d5a7f4a974f0ef85e7ddf7", "title": "InclusiveFaceNet: Improving Face Attribute Detection with Race and Gender Diversity", "year": "2017", "pdf": "https://arxiv.org/pdf/1712.00193.pdf"}, {"id": "28d4e027c7e90b51b7d8908fce68128d1964668a", "title": "Level Playing Field for Million Scale Face Recognition", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.00393.pdf"}, {"id": "a022eff5470c3446aca683eae9c18319fd2406d5", "title": "Deep learning for semantic description of visual human traits. (Apprentissage profond pour la description s\u00e9mantique des traits visuels humains)", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/a022/eff5470c3446aca683eae9c18319fd2406d5.pdf"}, {"id": "26c7eda262dfda1c3a3597a3bf1f2f1cc4013425", "title": "Some Like It Hot — Visual Guidance for Preference Prediction", "year": 2016, "pdf": null}, {"id": "91a1945b9c40af4944a6cdcfe59a0999de4f650a", "title": "Age Estimation by Refining Label Distribution in Deep CNN", "year": 2017, "pdf": "https://doi.org/10.1007/978-3-319-69923-3_10"}, {"id": "a4cd3fc63ddc8468d3f684f32cb0578e41fed226", "title": "Generative Adversarial Style Transfer Networks for Face Aging", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/ea7d/ff897a6618a5ae9c7fed19899ac0d3a4a04e.pdf"}, {"id": "632fa986bed53862d83918c2b71ab953fd70d6cc", "title": "What Face and Body Shapes Can Tell About Height", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.10355.pdf"}, {"id": "d11d0151618987ce00a88ceda55d35f0bb89122e", "title": "An analysis of automatic gender detection by first-order configural relations", "year": 2017, "pdf": null}, {"id": "dca2bb023b076de1ccd0c6b8d71faeb3fccb3978", "title": "Joint Estimation of Age and Expression by Combining Scattering and Convolutional Networks", "year": 2018, "pdf": "http://doi.acm.org/10.1145/3152118"}, {"id": "bb2944569a2b3d3b8340b36d4903c8cddf20047f", "title": "Improving Regression Performance with Distributional Losses", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.04613.pdf"}, {"id": "f77c9bf5beec7c975584e8087aae8d679664a1eb", "title": "Local Deep Neural Networks for Age and Gender Classification", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/f77c/9bf5beec7c975584e8087aae8d679664a1eb.pdf"}, {"id": "a9ad8f6c6bf110485921b17f9790241b1548487c", "title": "Automatic Skin Tone Extraction for Visagism Applications", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/a9ad/8f6c6bf110485921b17f9790241b1548487c.pdf"}, {"id": "ad01c5761c89fdf523565cc0dec77b9a6ec8e694", "title": "Global and Local Consistent Wavelet-domain Age Synthesis", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.07764.pdf"}, {"id": "d4d1ac1cfb2ca703c4db8cc9a1c7c7531fa940f9", "title": "Gender estimation based on supervised HOG, Action Units and unsupervised CNN feature extraction", "year": 2017, "pdf": null}, {"id": "2b60fe300735ea7c63f91c1121e89ba66040b833", "title": "A study on face recognition techniques with age and gender classification", "year": 2017, "pdf": null}, {"id": "d5444f9475253bbcfef85c351ea9dab56793b9ea", "title": "BoxCars: Improving Fine-Grained Recognition of Vehicles using 3-D Bounding Boxes in Traffic Surveillance", "year": "2017", "pdf": "https://arxiv.org/pdf/1703.00686.pdf"}, {"id": "79c959833ff49f860e20b6654dbf4d6acdee0230", "title": "Hide-and-Seek: A Data Augmentation Technique for Weakly-Supervised Localization and Beyond", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.02545.pdf"}, {"id": "af6e351d58dba0962d6eb1baf4c9a776eb73533f", "title": "How to Train Your Deep Neural Network with Dictionary Learning", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/af6e/351d58dba0962d6eb1baf4c9a776eb73533f.pdf"}, {"id": "22bebedc1a5f3556cb4f577bdbe032299a2865e8", "title": "Effective training of convolutional neural networks for face-based gender and age prediction", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/22be/bedc1a5f3556cb4f577bdbe032299a2865e8.pdf"}, {"id": "af6cae71f24ea8f457e581bfe1240d5fa63faaf7", "title": "Multi-Task Zipping via Layer-wise Neuron Sharing", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.09791.pdf"}, {"id": "633c851ebf625ad7abdda2324e9de093cf623141", "title": "Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961727"}, {"id": "5366573e96a1dadfcd4fd592f83017e378a0e185", "title": "Server, server in the cloud. Who is the fairest in the crowd?", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/5366/573e96a1dadfcd4fd592f83017e378a0e185.pdf"}, {"id": "81e628a23e434762b1208045919af48dceb6c4d2", "title": "Attend and Rectify: A Gated Attention Mechanism for Fine-Grained Recovery", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.07320.pdf"}, {"id": "8c2233d763deb01761abe72b9b3dbb0b115916d3", "title": "Deep Modeling of Human Age Guesses for Apparent Age Estimation", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8489570"}, {"id": "357963a46dfc150670061dbc23da6ba7d6da786e", "title": "Online Regression with Model Selection", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/3579/63a46dfc150670061dbc23da6ba7d6da786e.pdf"}, {"id": "4560491820e0ee49736aea9b81d57c3939a69e12", "title": "Investigating the Impact of Data Volume and Domain Similarity on Transfer Learning Applications", "year": "2017", "pdf": "https://arxiv.org/pdf/1712.04008.pdf"}, {"id": "c62c07de196e95eaaf614fb150a4fa4ce49588b4", "title": "SSR-Net: A Compact Soft Stagewise Regression Network for Age Estimation", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/c62c/07de196e95eaaf614fb150a4fa4ce49588b4.pdf"}, {"id": "97540905e4a9fdf425989a794f024776f28a3fa9", "title": "NDDR-CNN: Layer-wise Feature Fusing in Multi-Task CNN by Neural Discriminative Dimensionality Reduction", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/cc5a/1bf68ba00c20415e43684c6f75ce3fbc176c.pdf"}, {"id": "cca476114c48871d05537abb303061de5ab010d6", "title": "A compact deep convolutional neural network architecture for video based age and gender estimation", "year": 2016, "pdf": "https://doi.org/10.15439/2016F472"}, {"id": "6601a96220005883572fad5aa6b4632e413c8e5e", "title": "Recurrent learning of context for salient region detection", "year": "2018", "pdf": "http://doi.org/10.1007/s00779-018-1171-0"}, {"id": "377c6563f97e76a4dc836a0bd23d7673492b1aae", "title": "Motion deblurring of faces", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/377c/6563f97e76a4dc836a0bd23d7673492b1aae.pdf"}, {"id": "1277b1b8b609a18b94e4907d76a117c9783a5373", "title": "VirtualIdentity: Privacy preserving user profiling", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ASONAM.2016.7752438"}, {"id": "6e911227e893d0eecb363015754824bf4366bdb7", "title": "Wasserstein Divergence for GANs", "year": "2018", "pdf": "https://arxiv.org/pdf/1712.01026.pdf"}, {"id": "4562ea84ebfc8d9864e943ed9e44d35997bbdf43", "title": "Small Sample Deep Learning for Newborn Gestational Age Estimation", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.19"}, {"id": "02567fd428a675ca91a0c6786f47f3e35881bcbd", "title": "Deep Label Distribution Learning With Label Ambiguity", "year": 2017, "pdf": "https://arxiv.org/pdf/1611.01731.pdf"}, {"id": "ff60d4601adabe04214c67e12253ea3359f4e082", "title": "Video-based emotion recognition in the wild using deep transfer learning and score fusion", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/ff60/d4601adabe04214c67e12253ea3359f4e082.pdf"}, {"id": "efb56e7488148d52d3b8a2dae9f8880b273f4226", "title": "Efficient Facial Representations for Age, Gender and Identity Recognition in Organizing Photo Albums using Multi-output CNN", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.07718.pdf"}, {"id": "2c2f03edc9b76e5ac132b54b2e3313237e22b5e7", "title": "Towards neural art-based face de-identification in video data", "year": 2016, "pdf": null}, {"id": "d0296efc3c532269aaa7e8f856f5d1807af847fb", "title": "Improving the face recognition system by hybrid image preprocessing", "year": 2016, "pdf": null}, {"id": "5e39deb4bff7b887c8f3a44dfe1352fbcde8a0bd", "title": "Supervised COSMOS Autoencoder: Learning Beyond the Euclidean Loss!", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.06221.pdf"}, {"id": "17de5a9ce09f4834629cd76b8526071a956c9c6d", "title": "Smart Parental Advisory: A Usage Control and Deep Learning-Based Framework for Dynamic Parental Control on Smart TV", "year": 2017, "pdf": "https://doi.org/10.1007/978-3-319-68063-7_8"}, {"id": "fdd80b2139ff1b9becb17badd053b9a4a6a243f2", "title": "A method of facial wearable items recognition", "year": 2017, "pdf": null}, {"id": "f60070d3a4d333aa1436e4c372b1feb5b316a7ba", "title": "Face Recognition via Centralized Coordinate Learning", "year": "2018", "pdf": "https://arxiv.org/pdf/1801.05678.pdf"}, {"id": "f839ae810338e3b12c8e2f8db6ce4d725738d2d9", "title": "Learning CNNs for Face Recognition from Weakly Annotated Images", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.115"}, {"id": "880b4be9afc4d5ef75b5d77f51eadb557acbf251", "title": "Privacy-Preserving Age Estimation for Content Rating", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8547144"}, {"id": "0435a34e93b8dda459de49b499dd71dbb478dc18", "title": "VEGAC: Visual Saliency-based Age, Gender, and Facial Expression Classification Using Convolutional Neural Networks", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/0435/a34e93b8dda459de49b499dd71dbb478dc18.pdf"}, {"id": "ca50e441e275a3c04299bb6b59f6c098abecec1d", "title": "Face Recognition and Age Estimation Implications of Changes in Facial Features: A Critical Review Study", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8361072"}, {"id": "15f82c3a7f12b82281aca77d519403086611ae69", "title": "Comparative Study of Human Age Estimation Based on Hand-Crafted and Deep Face Features", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/15f8/2c3a7f12b82281aca77d519403086611ae69.pdf"}, {"id": "366e650a578a3732ebe10267f04bcf9d3129f076", "title": "Design of Smart Shopping Wall Using Hand Gesture and Facial Image Recognition", "year": 2017, "pdf": null}, {"id": "daa120032d8f141bc6aae20e23b1b754a0dd7d5f", "title": "Kernel ELM and CNN Based Facial Age Estimation", "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789593"}, {"id": "06bd34951305d9f36eb29cf4532b25272da0e677", "title": "A Fast and Accurate System for Face Detection, Identification, and Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.07586.pdf"}, {"id": "ad53d57f69fdc3fcd6754477895ff4f51f503bf8", "title": "On the application of quantization for mobile optimized convolutional neural networks as a predictor of realtime ageing biomarkers", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8471792"}, {"id": "10195a163ab6348eef37213a46f60a3d87f289c5", "title": "Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks", "year": 2016, "pdf": "https://doi.org/10.1007/s11263-016-0940-3"}, {"id": "d818568838433a6d6831adde49a58cef05e0c89f", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984"}, {"id": "1b9472907f5b7a1815c98b4562dce6c46dd2cf34", "title": "Consistent Rank Logits for Ordinal Regression with Convolutional Neural Networks", "year": "2019", "pdf": "https://arxiv.org/pdf/1901.07884.pdf"}, {"id": "0deea943ac4dc1be822c02f97d0c6c97e201ba8d", "title": "Age category estimation using matching convolutional neural network", "year": 2018, "pdf": null}, {"id": "1171ec9250743c349e5218d4a01c4fdad94c7707", "title": "Low-Cost Transfer Learning of Face Tasks", "year": "2019", "pdf": "https://arxiv.org/pdf/1901.02675.pdf"}, {"id": "ac12ba5bf81de83991210b4cd95b4ad048317681", "title": "Combining Deep Facial and Ambient Features for First Impression Estimation", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/ac12/ba5bf81de83991210b4cd95b4ad048317681.pdf"}, {"id": "57db5825a8eb2927735fb7c18c3ee4fb18d27d47", "title": "Max-Mahalanobis Linear Discriminant Analysis Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1802.09308.pdf"}, {"id": "02e17f547dd75eee7282af1b5ad2626829615ac9", "title": "Beyond Counting: Comparisons of Density Maps for Crowd Analysis Tasks - Counting, Detection, and Tracking", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.10118.pdf"}, {"id": "6be2f5d7417c7a441f4f3357f2c1877aa4604b67", "title": "Facial Coding Scheme Reference 1 Craniofacial Distances", "year": "2019", "pdf": null}, {"id": "6f6b4e2885ea1d9bea1bb2ed388b099a5a6d9b81", "title": "Structured Output SVM Prediction of Apparent Age, Gender and Smile from Deep Features", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.96"}, {"id": "4c81c76f799c48c33bb63b9369d013f51eaf5ada", "title": "Multi-modal Score Fusion and Decision Trees for Explainable Automatic Job Candidate Screening from Video CVs", "year": 2017, "pdf": "https://www.cmpe.boun.edu.tr/~salah/kaya17chalearn.pdf"}, {"id": "341ed69a6e5d7a89ff897c72c1456f50cfb23c96", "title": "DAGER: Deep Age, Gender and Emotion Recognition Using Convolutional Neural Network", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/cd7f/26c430363f90e530824446b3a4c85cfb94e5.pdf"}, {"id": "99f43e5f8f4348c04e97590ec173d61d2be1882d", "title": "Small Sample Deep Learning for Newborn Gestational Age Estimation", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961726"}, {"id": "6db593d99fabc7f787f41786cb2e37c084f1ad19", "title": "Deep Age Estimation Model Stabilization from Images to Videos", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545283"}, {"id": "9755554b13103df634f9b1ef50a147dd02eab02f", "title": "How Transferable Are CNN-Based Features for Age and Gender Classification?", "year": 2016, "pdf": "https://doi.org/10.1109/BIOSIG.2016.7736925"}, {"id": "71c4b8e1bb25ee80f4317411ea8180dae6499524", "title": "Extended Features using Machine Learning Techniques for Photo Liking Prediction", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8463396"}, {"id": "7cee802e083c5e1731ee50e731f23c9b12da7d36", "title": "2^B3^C: 2 Box 3 Crop of Facial Image for Gender Classification with Convolutional Networks", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/7cee/802e083c5e1731ee50e731f23c9b12da7d36.pdf"}, {"id": "10d6fe566125597195cf985084347139a8cdad1e", "title": "Private and Scalable Personal Data Analytics Using Hybrid Edge-to-Cloud Deep Learning", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8364651"}, {"id": "f1ea8bdb3bd39d8269628bc7b99b2d918ea23ef7", "title": "Human features extraction by using anatomical and low level image descriptors from whole body images", "year": 2016, "pdf": null}, {"id": "8d5998cd984e7cce307da7d46f155f9db99c6590", "title": "ChaLearn looking at people: A review of events and resources", "year": "2017", "pdf": "https://arxiv.org/pdf/1701.02664.pdf"}, {"id": "fca6df7d36f449d48a8d1e48a78c860d52e3baf8", "title": "Fine-Grained Age Estimation in the wild with Attention LSTM Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.10445.pdf"}, {"id": "2a7b7de7488211471a001044a3a249a117af488a", "title": "Physical Attribute Prediction Using Deep Residual Neural Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1812.07857.pdf"}, {"id": "d2be07499aea3ef593fae5b2f6cd89fe627f4d98", "title": "Face Aging with Improved Invertible Conditional GANs", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8546268"}, {"id": "d6c8f5674030cf3f5a2f7cc929bad37a422b26a0", "title": "Face Aging Simulation with Deep Convolutional Generative Adversarial Networks", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337371"}, {"id": "a0cce54ef20ad75dee4769c2ecaaac690d0eb21d", "title": "Does A Body Image Tell Age?", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545590"}, {"id": "a7664247a37a89c74d0e1a1606a99119cffc41d4", "title": "Modal Consistency based Pre-Trained Multi-Model Reuse", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/a766/4247a37a89c74d0e1a1606a99119cffc41d4.pdf"}, {"id": "0e3840ea3227851aaf4633133dd3cbf9bbe89e5b", "title": "ChaLearn Looking at People: Events and Resources", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/8d59/98cd984e7cce307da7d46f155f9db99c6590.pdf"}, {"id": "93e962f8886eae13b02ad2aa98bdedfbd7e68709", "title": "Dual Conditional GANs for Face Aging and Rejuvenation", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/93e9/62f8886eae13b02ad2aa98bdedfbd7e68709.pdf"}, {"id": "717ffde99c0d6b58675d44b4c66acedce0ca86e8", "title": "Age estimation based on face images and pre-trained convolutional neural networks", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8285381"}, {"id": "e5563a0d6a2312c614834dc784b5cc7594362bff", "title": "Real-Time Demographic Profiling from Face Imagery with Fisher Vectors", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/e556/3a0d6a2312c614834dc784b5cc7594362bff.pdf"}, {"id": "6f3054f182c34ace890a32fdf1656b583fbc7445", "title": "Age Estimation Robust to Optical and Motion Blurring by Deep Residual CNN", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/6f30/54f182c34ace890a32fdf1656b583fbc7445.pdf"}, {"id": "4905323aaf61952e07f62c18fa662c7da895e40e", "title": "Recovering Joint and Individual Components in Facial Data", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8219732"}, {"id": "a398fcb30b49bee32d8f2c4cd3939517cd262025", "title": "Generative Adversarial Style Transfer Networks for Face Aging", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8575452"}, {"id": "7b92d1e53cc87f7a4256695de590098a2f30261e", "title": "From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8575487"}, {"id": "ac2e3a889fc46ca72f9a2cdedbdd6f3d4e9e2627", "title": "Age detection from a single image using multitask neural networks : An overview and design proposal", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/ac2e/3a889fc46ca72f9a2cdedbdd6f3d4e9e2627.pdf"}]}
\ No newline at end of file diff --git a/site/datasets/unknown/lfw.json b/site/datasets/unknown/lfw.json new file mode 100644 index 00000000..4ad1720e --- /dev/null +++ b/site/datasets/unknown/lfw.json @@ -0,0 +1 @@ +{"id": "2d3482dcff69c7417c7b933f22de606a0e8e42d4", "paper": {"paper_id": "2d3482dcff69c7417c7b933f22de606a0e8e42d4", "key": "lfw", "title": "Labeled Faces in the Wild : Updates and New Reporting Procedures", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf", "address": {"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}, "name": "LFW"}, "citations": [{"id": "b216040f110d2549f61e3f5a7261cab128cab361", "title": "Weighted Voting of Discriminative Regions for Face Recognition", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/b216/040f110d2549f61e3f5a7261cab128cab361.pdf"}, {"id": "6afe1f668eea8dfdd43f0780634073ed4545af23", "title": "Deep learning for content-based video retrieval in film and television production", "year": 2017, "pdf": "https://doi.org/10.1007/s11042-017-4962-9"}, {"id": "0e7a792ef33af26c26970ffc275d0ae82ee8f5d1", "title": "A Deep Regression Architecture with Two-Stage Re-initialization for High Performance Facial Landmark Detection", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099876"}, {"id": "173657da03e3249f4e47457d360ab83b3cefbe63", "title": "HKU-Face : A Large Scale Dataset for Deep Face Recognition Final Report", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/1736/57da03e3249f4e47457d360ab83b3cefbe63.pdf"}, {"id": "303828619630ca295f772be0a7b9fe8007dfaea3", "title": "Face verification in the wild using similarity in representations", "year": 2017, "pdf": null}, {"id": "7ef0cc4f3f7566f96f168123bac1e07053a939b2", "title": "Triangular Similarity Metric Learning: a Siamese Architecture Approach. ( L'apprentissage de similarit\u00e9 triangulaire en utilisant des r\u00e9seaux siamois)", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/e735/b8212d8a81909753291d5d06789a917014f8.pdf"}, {"id": "2d88e7922d9f046ace0234f9f96f570ee848a5b5", "title": "Detection under Privileged Information", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/2d88/e7922d9f046ace0234f9f96f570ee848a5b5.pdf"}, {"id": "0db1cd7a4aed9d17256b02946660a2ea902d6635", "title": "Keystroke Biometrics Ongoing Competition", "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7738412"}, {"id": "ac5d0705a9ddba29151fd539c668ba2c0d16deb6", "title": "RED-Net: A Recurrent Encoder\u2013Decoder Network for Video-Based Face Alignment", "year": "2018", "pdf": "https://arxiv.org/pdf/1801.06066.pdf"}, {"id": "6abdeda10a4261ae2d91806d367f7eca19e44792", "title": "Face recognition in real-world images", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952403"}, {"id": "7aa4c16a8e1481629f16167dea313fe9256abb42", "title": "Multi-task learning for face identification and attribute estimation", "year": 2017, "pdf": "https://doi.org/10.1109/ICASSP.2017.7952703"}, {"id": "80688e72b00013eabe57ce88be0c204d0b5aea2c", "title": "Semantic Face Signatures: Recognizing and Retrieving Faces by Verbal Descriptions", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8078274"}, {"id": "0c65226edb466204189b5aec8f1033542e2c17aa", "title": "A study of CNN outside of training conditions", "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296997"}, {"id": "65ee4de888e5b934429dcb126ee0ae544156c9bd", "title": "Face recognition using linear representation ensembles", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/65ee/4de888e5b934429dcb126ee0ae544156c9bd.pdf"}, {"id": "3393459600368be2c4c9878a3f65a57dcc0c2cfa", "title": "Eigen-PEP for Video Face Recognition", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/3393/459600368be2c4c9878a3f65a57dcc0c2cfa.pdf"}, {"id": "b26d23501f8e9f89ce822057a6c645bc9a634b2c", "title": "Multi Seed Authentication Using S/Key Scheme", "year": 2015, "pdf": "https://doi.org/10.1109/HPCC-CSS-ICESS.2015.104"}, {"id": "7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22", "title": "Labeled Faces in the Wild: A Survey", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf"}, {"id": "a7127869e36c05e8bd0b7a99e645cf22d6651155", "title": "Similarity calculation for face verification with convolutional neural network", "year": 2017, "pdf": null}, {"id": "e6b45d5a86092bbfdcd6c3c54cda3d6c3ac6b227", "title": "Pairwise Relational Networks for Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.04976.pdf"}, {"id": "d5cdcaa19a62e3cb97da013555b99fe35e724e38", "title": "Improved ArtGAN for Conditional Synthesis of Natural Image and Artwork", "year": "2018", "pdf": "https://arxiv.org/pdf/1708.09533.pdf"}, {"id": "7d85c3a1bae00efde989180f90fcc3b42c4013e1", "title": "<italic>Spartans</italic>: Single-Sample Periocular-Based Alignment-Robust Recognition Technique Applied to Non-Frontal Scenarios", "year": 2015, "pdf": "https://doi.org/10.1109/TIP.2015.2468173"}, {"id": "27e97b67a8401def58eb41b4b00d3dfb0e4ad1a8", "title": "Knowledge Based Face Detection Using Fusion Features", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/27e9/7b67a8401def58eb41b4b00d3dfb0e4ad1a8.pdf"}, {"id": "ba1c3dd512fd1fbd7325d63aad435ff07196755e", "title": "Seed-based authentication", "year": 2015, "pdf": "https://doi.org/10.1109/CTS.2015.7210447"}, {"id": "392c8e575f8520bb880959d494be0911d091b525", "title": "Cross-Modal Metric Learning for AUC Optimization", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8246530"}, {"id": "4ad8178e285901c886e3e2e66f2340115347b23f", "title": "A Comprehensive Study on Center Loss for Deep Face Recognition", "year": "2018", "pdf": "http://doi.org/10.1007/s11263-018-01142-4"}, {"id": "92d051d4680eb41eb172d23cb8c93eed7677af56", "title": "Adversarial Spatial Frequency Domain Critic Learning for Age and Gender Classification", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451616"}, {"id": "2bbe89f61a8d6d4d6e39fdcaf8c185f110a01c78", "title": "Joint Feature Learning for Face Recognition", "year": 2015, "pdf": "http://www3.ntu.edu.sg/home/wanggang/TIFS15.pdf"}, {"id": "3df44e3a547c7ccbc1222bdeaeef6c899c59dc30", "title": "Face Feature Extraction: A Complete Review", "year": 2018, "pdf": "https://doi.org/10.1109/ACCESS.2017.2784842"}, {"id": "4b48e912a17c79ac95d6a60afed8238c9ab9e553", "title": "Minimum Margin Loss for Deep Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.06741.pdf"}, {"id": "74ed90588ff5b2aa65c1b0882f1aa50caf5c2127", "title": "A multimodal deep learning framework using local feature representations for face recognition", "year": "2017", "pdf": "http://doi.org/10.1007/s00138-017-0870-2"}, {"id": "f77c9bf5beec7c975584e8087aae8d679664a1eb", "title": "Local Deep Neural Networks for Age and Gender Classification", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/f77c/9bf5beec7c975584e8087aae8d679664a1eb.pdf"}, {"id": "204b2b9c33d2c3be77dbd6d82dfefd1dfa95d8a3", "title": "Deep Face Recognition under Eyeglass and Scale Variation Using Extended Siamese Network", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8575869"}, {"id": "48499deeaa1e31ac22c901d115b8b9867f89f952", "title": "Interim Report of Final Year Project HKU-Face : A Large Scale Dataset for Deep Face Recognition", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/4849/9deeaa1e31ac22c901d115b8b9867f89f952.pdf"}, {"id": "3baa3d5325f00c7edc1f1427fcd5bdc6a420a63f", "title": "Enhancing convolutional neural networks for face recognition with occlusion maps and batch triplet loss", "year": "2018", "pdf": "https://arxiv.org/pdf/1707.07923.pdf"}, {"id": "b624d6eaa1d21e2a34e1b9bc34cfe2159ed39449", "title": "Cross Audio-Visual Speaker Identification in the Wild Using Deep Learning", "year": "2017", "pdf": null}, {"id": "7d40e7e5c01bd551edf65902386401e1b8b8014b", "title": "Channel-Level Acceleration of Deep Face Representations", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7303876"}, {"id": "214ac8196d8061981bef271b37a279526aab5024", "title": "Face Recognition Using Smoothed High-Dimensional Representation", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/214a/c8196d8061981bef271b37a279526aab5024.pdf"}, {"id": "ca5e9973a4494c608548f639eb9a391f6235d4f0", "title": "Robust RGB-D Face Recognition Using Attribute-Aware Loss", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.09847.pdf"}, {"id": "6502cf30c088c6c7c4b2a05b7777b032c9dde7cd", "title": "Learning Compact Binary Face Descriptor for Face Recognition", "year": 2015, "pdf": "http://vipl.ict.ac.cn/homepage/CVPR15Metric/ref/Learning%20compact%20binary%20face%20descriptor%20for%20face%20recognition_PAMI2015.pdf"}, {"id": "0f9e0eb925b3dbc6a56731710b06716646babc8e", "title": "Pairwise Identity Verification via Linear Concentrative Metric Learning", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7786904"}, {"id": "726f76f11e904d7fcb12736c276a0b00eb5cde49", "title": "A Performance Comparison of Loss Functions for Deep Face Recognition", "year": "2019", "pdf": "https://arxiv.org/pdf/1901.05903.pdf"}, {"id": "7e8c8b1d72c67e2e241184448715a8d4bd88a727", "title": "Face Verification Based on Relational Disparity Features and Partial Least Squares Models", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8097314"}, {"id": "53de11d144cd2eda7cf1bb644ae27f8ef2489289", "title": "Extending Detection with Privileged Information via Generalized Distillation", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8424637"}, {"id": "a146c83b0fddbc5edb5850e1833521c356f1fc0c", "title": "Local binary pattern network: A deep learning approach for face recognition", "year": 2016, "pdf": "https://doi.org/10.1109/ICIP.2016.7532955"}, {"id": "4be3d083e05fed1adc1af3dc4a119b74590b0eaa", "title": "Image-Based Seed Generation", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CIC.2016.064"}, {"id": "ecd08edab496801fd4fde45362dde462d00ee91c", "title": "Compressive Binary Patterns: Designing a Robust Binary Face Descriptor with Random-Field Eigenfilters.", "year": "2018", "pdf": "https://www.ncbi.nlm.nih.gov/pubmed/29994561"}, {"id": "52e181018aa26df71a02ffffaa1d061fc43cf09d", "title": "Coupled 3D Convolutional Neural Networks for Audio-Visual Recognition", "year": "2017", "pdf": null}, {"id": "0dccc881cb9b474186a01fd60eb3a3e061fa6546", "title": "Effective face frontalization in unconstrained images", "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_104_ext.pdf"}, {"id": "df51dfe55912d30fc2f792561e9e0c2b43179089", "title": "Face Hallucination Using Linear Models of Coupled Sparse Support", "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2717181"}, {"id": "10ab1b48b2a55ec9e2920a5397febd84906a7769", "title": "I-Pic: A Platform for Privacy-Compliant Image Capture", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/10ab/1b48b2a55ec9e2920a5397febd84906a7769.pdf"}, {"id": "1fcdc113a5df2f45a1f4b3249c041d942a3a730b", "title": "Reconstruction-Based Metric Learning for Unconstrained Face Verification", "year": 2015, "pdf": "https://doi.org/10.1109/TIFS.2014.2363792"}, {"id": "ff3a89da1140c7d75586527002f9fd28cbfb04b6", "title": "Strengths and weaknesses of deep learning models for face recognition against image degradations", "year": "2018", "pdf": "http://doi.org/10.1049/iet-bmt.2017.0083"}, {"id": "6cacda04a541d251e8221d70ac61fda88fb61a70", "title": "One-shot Face Recognition by Promoting Underrepresented Classes", "year": "2017", "pdf": "https://arxiv.org/pdf/1707.05574.pdf"}, {"id": "46b960d3d871b2ee19d1b8e8838e7036c2ee56ed", "title": "Optimal image compression via block-based adaptive colour reduction with minimal contour effect", "year": "2018", "pdf": "http://doi.org/10.1007/s11042-018-6118-y"}, {"id": "5bb87c7462c6c1ec5d60bde169c3a785ba5ea48f", "title": "Targeting Ultimate Accuracy: Face Recognition via Deep Embedding", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/7589/58f2340ba46c6708b73d5427985d5623a512.pdf"}, {"id": "930b3472592ced6665cd630be7ae57d4abb8b4b1", "title": "Development of two novel face-recognition CAPTCHAs: A security and usability study", "year": "2016", "pdf": "http://doi.org/10.1016/j.cose.2016.03.007"}, {"id": "2a09be75ebe85f64a198d20766872c66cb2f00d6", "title": "Person Recognition at a Distance: Improving Face Recognition Through Body Static Information", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545112"}, {"id": "2348f1fa2940b01ec90e023fac8cc96812189774", "title": "Face verification based on convolutional neural network and deep learning", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/EWDTS.2017.8110157"}, {"id": "5b01d4338734aefb16ee82c4c59763d3abc008e6", "title": "A Robust Face Recognition Algorithm Based on Kernel Regularized Relevance-Weighted Discriminant Analysis", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/5b01/d4338734aefb16ee82c4c59763d3abc008e6.pdf"}, {"id": "d6e785d3466eeaaae1f6c792f679d1111ab30302", "title": "People tracking and re-identification by face recognition for RGB-D camera networks", "year": 2017, "pdf": "https://doi.org/10.1109/ECMR.2017.8098689"}, {"id": "1da8178bfca7c76cae53ec34364d86c7d5713fdd", "title": "Pairwise Relational Networks using Local Appearance Features for Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.06405.pdf"}, {"id": "d9208c964bed4cc0055e313353c73fd00a60c412", "title": "Multi-class Fukunaga Koontz discriminant analysis for enhanced face recognition", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d920/8c964bed4cc0055e313353c73fd00a60c412.pdf"}, {"id": "ef230e3df720abf2983ba6b347c9d46283e4b690", "title": "QUIS-CAMPI: an annotated multi-biometrics data feed from surveillance scenarios", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/ef23/0e3df720abf2983ba6b347c9d46283e4b690.pdf"}, {"id": "5b721f86f4a394f05350641e639a9d6cb2046c45", "title": "Detection under Privileged Information", "year": "2018", "pdf": "https://arxiv.org/pdf/1603.09638.pdf"}, {"id": "9c3a25f7f8ac74cc3e5335bc06ce85f16edc13ae", "title": "Robust collaborative representation-based classification via regularization of truncated total least squares", "year": 2018, "pdf": null}, {"id": "f15b7c317f106816bf444ac4ffb6c280cd6392c7", "title": "Deep Disguised Faces Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8575272"}, {"id": "07d3dddb363870cfd980ca8d748c8b1418aad863", "title": "Learning from Differentially Private Neural Activations with Edge Computing", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8567659"}, {"id": "77d31d2ec25df44781d999d6ff980183093fb3de", "title": "The Multiverse Loss for Robust Transfer Learning", "year": 2016, "pdf": "http://openaccess.thecvf.com/content_cvpr_2016/supplemental/Littwin_The_Multiverse_Loss_2016_CVPR_supplemental.pdf"}, {"id": "689d6bc6e99bbfedfabc0654724139348dbae37c", "title": "Object-Level Video Advertising: An Optimization Framework", "year": 2017, "pdf": "https://doi.org/10.1109/TII.2016.2605629"}, {"id": "59fc69b3bc4759eef1347161e1248e886702f8f7", "title": "Final Report of Final Year Project HKU-Face : A Large Scale Dataset for Deep Face Recognition", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/59fc/69b3bc4759eef1347161e1248e886702f8f7.pdf"}, {"id": "06b0a66fd062a9bc7f0d94e270e4ddf47b275c10", "title": "Compact deep learned feature-based face recognition for Visual Internet of Things", "year": "2017", "pdf": "http://doi.org/10.1007/s11227-017-2198-0"}, {"id": "5ec7925cb1af29cc4916ed552a59db42c82f52d0", "title": "Cross Audio-Visual Recognition in the Wild Using Deep Learning", "year": "2017", "pdf": null}, {"id": "5dc056fe911a3e34a932513abe637076250d96da", "title": "Real-time facial feature detection using conditional regression forests", "year": 2012, "pdf": "http://www.vision.ee.ethz.ch/~gfanelli/pubs/cvpr12.pdf"}, {"id": "4209783b0cab1f22341f0600eed4512155b1dee6", "title": "Accurate and Efficient Similarity Search for Large Scale Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.00365.pdf"}, {"id": "3e687d5ace90c407186602de1a7727167461194a", "title": "Photo Tagging by Collection-Aware People Recognition", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/3e68/7d5ace90c407186602de1a7727167461194a.pdf"}, {"id": "8ea8cdee6f62751d87339f821d2b2a094ab4b260", "title": "Enabling Live Video Analytics with a Scalable and Privacy-Aware Framework", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3209659"}, {"id": "a38dd439209b0913b14b1c3c71143457d8cf9b78", "title": "Face recognition in unconstrained environments", "year": 2015, "pdf": "https://doi.org/10.1109/IJCNN.2015.7280803"}, {"id": "c94c2cf52fef0503c09268c7d1faee60465ee08e", "title": "BenchIP: Benchmarking Intelligence Processors", "year": "2018", "pdf": "https://arxiv.org/pdf/1710.08315.pdf"}, {"id": "d57dca4413ad4f33c97ae06a5a7fc86dc5a75f8b", "title": "Gender recognition: Methods, datasets and results", "year": 2015, "pdf": "http://iplab.dmi.unict.it/sites/default/files/_11.pdf"}, {"id": "20a3ce81e7ddc1a121f4b13e439c4cbfb01adfba", "title": "Sparse-MVRVMs Tree for Fast and Accurate Head Pose Estimation in the Wild", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/e805/bc872e18277c7cbfce82206cf1667cce22cc.pdf"}, {"id": "77a9b1856ebbc9a6170ee4c572a515d6db062cef", "title": "Towards a practical face recognition system: Robust registration and illumination by sparse representation", "year": 2009, "pdf": "http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/1291.pdf"}, {"id": "ea2ee5c53747878f30f6d9c576fd09d388ab0e2b", "title": "Viola-Jones Based Detectors: How Much Affects the Training Set?", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/ea2e/e5c53747878f30f6d9c576fd09d388ab0e2b.pdf"}, {"id": "539ffd51f18404e1ef83371488cf5a27cd16d064", "title": "Real-world gender classification via local Gabor binary pattern and three-dimensional face reconstruction by generic elastic model", "year": 2015, "pdf": "https://doi.org/10.1049/iet-ipr.2014.0733"}, {"id": "8e2bd1192b60cdb75c99234ccbd50ca920a47d00", "title": "Joint Rate-Distortion Optimization for Simultaneous Texture and Deep Feature Compression of Facial Images", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8499170"}, {"id": "3e18b439a6fff09a0e4c245eb1298531cc766a72", "title": "Semi-automatic Face Image Finding Method , Which Uses the 3 D Model of the Head for Recognising an Unknown Face", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/3e18/b439a6fff09a0e4c245eb1298531cc766a72.pdf"}, {"id": "e13360cda1ebd6fa5c3f3386c0862f292e4dbee4", "title": "Range Loss for Deep Face Recognition with Long-tail", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/e133/60cda1ebd6fa5c3f3386c0862f292e4dbee4.pdf"}, {"id": "b941d4a85be783a6883b7d41c1afa7a9db451831", "title": "Radiofrequency ablation planning for cardiac arrhythmia treatment using modeling and machine learning approaches", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/0a60/566568debd3f3f48e663d3af20b8f13a94f2.pdf"}, {"id": "310dcf9edb491b63d09a9eb55a99ad6bb46da1d4", "title": "A learning-based human facial image quality evaluation method in video-based face recognition systems", "year": 2017, "pdf": null}, {"id": "c398684270543e97e3194674d9cce20acaef3db3", "title": "Comparative Face Soft Biometrics for Human Identification", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/c398/684270543e97e3194674d9cce20acaef3db3.pdf"}, {"id": "2c8743089d9c7df04883405a31b5fbe494f175b4", "title": "Real-time full-body human gender recognition in (RGB)-D data", "year": 2015, "pdf": "http://srl.informatik.uni-freiburg.de/publicationsdir/linderICRA15.pdf"}, {"id": "d2d9612d3d67582d0cd7c1833599b88d84288fab", "title": "A comparison of deep multilayer networks and Markov random field matching models for face recognition in the wild", "year": 2016, "pdf": "https://doi.org/10.1049/iet-cvi.2015.0222"}, {"id": "566038a3c2867894a08125efe41ef0a40824a090", "title": "Face recognition and gender classification in personal memories", "year": 2009, "pdf": "http://mirlab.org/conference_papers/international_conference/icassp%202009/pdfs/0001945.pdf"}, {"id": "0435a34e93b8dda459de49b499dd71dbb478dc18", "title": "VEGAC: Visual Saliency-based Age, Gender, and Facial Expression Classification Using Convolutional Neural Networks", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/0435/a34e93b8dda459de49b499dd71dbb478dc18.pdf"}, {"id": "7813d405450013bbdb0b3a917319d5964a89484a", "title": "From Affine Rank Minimization Solution to Sparse Modeling", "year": 2017, "pdf": "https://doi.org/10.1109/WACV.2017.62"}, {"id": "edef98d2b021464576d8d28690d29f5431fd5828", "title": "Pixel-Level Alignment of Facial Images for High Accuracy Recognition Using Ensemble of Patches", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/edef/98d2b021464576d8d28690d29f5431fd5828.pdf"}, {"id": "7323b594d3a8508f809e276aa2d224c4e7ec5a80", "title": "An Experimental Evaluation of Covariates Effects on Unconstrained Face Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.05508.pdf"}, {"id": "90ae02da16b750a9fd43f8a38440f848309c2fe0", "title": "A review of facial gender recognition", "year": 2015, "pdf": "https://doi.org/10.1007/s10044-015-0499-6"}, {"id": "ccb54fc5f263a8bc2a8373839cb6855f528f10d3", "title": "A realistic virtual environment for evaluating face analysis systems under dynamic conditions", "year": "2016", "pdf": "http://doi.org/10.1016/j.patcog.2015.11.008"}, {"id": "9207671d9e2b668c065e06d9f58f597601039e5e", "title": "Face Detection Using a 3D Model on Face Keypoints", "year": "2014", "pdf": "https://pdfs.semanticscholar.org/9207/671d9e2b668c065e06d9f58f597601039e5e.pdf"}, {"id": "cb522b2e16b11dde48203bef97131ddca3cdaebd", "title": "Fusion of Domain-Specific and Trainable Features for Gender Recognition From Face Images", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8331979"}, {"id": "c5c56e9c884ac4070880ac481909bb6b621d2a3f", "title": "Random ensemble metrics for object recognition", "year": 2011, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126466"}, {"id": "845f45f8412905137bf4e46a0d434f5856cd3aec", "title": "The Spyware Used in Intimate Partner Violence", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8418618"}, {"id": "d50a40f2d24363809a9ac57cf7fbb630644af0e5", "title": "End-to-end Trained CNN Encode-Decoder Networks for Image Steganography", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/d50a/40f2d24363809a9ac57cf7fbb630644af0e5.pdf"}, {"id": "13141284f1a7e1fe255f5c2b22c09e32f0a4d465", "title": "Object Tracking by Oversampling Local Features", "year": 2014, "pdf": "http://www.micc.unifi.it/pernici/index_files/ALIEN_final.pdf"}, {"id": "2d294c58b2afb529b26c49d3c92293431f5f98d0", "title": "Maximum Margin Projection Subspace Learning for Visual Data Analysis", "year": 2014, "pdf": "https://doi.org/10.1109/TIP.2014.2348868"}, {"id": "dae1726852228b9c3c2b45f440f38f904747e40f", "title": "A Method for Facial Emotion Recognition Based on Interest Points", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8509055"}, {"id": "0b8c92463f8f5087696681fb62dad003c308ebe2", "title": "On matching sketches with digital face images", "year": 2010, "pdf": "https://doi.org/10.1109/BTAS.2010.5634507"}, {"id": "39ed31ced75e6151dde41944a47b4bdf324f922b", "title": "Pose-Guided Photorealistic Face Rotation", "year": "", "pdf": "https://pdfs.semanticscholar.org/39ed/31ced75e6151dde41944a47b4bdf324f922b.pdf"}, {"id": "fea83550a21f4b41057b031ac338170bacda8805", "title": "Learning a Metric Embedding for Face Recognition using the Multibatch Method", "year": "2016", "pdf": "https://arxiv.org/pdf/1605.07270.pdf"}, {"id": "518439ba2895c84ba686db5b83674c440e637c0b", "title": "The Price of Fair PCA: One Extra Dimension", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.00103.pdf"}, {"id": "7808937b46acad36e43c30ae4e9f3fd57462853d", "title": "Describing people: A poselet-based approach to attribute classification", "year": 2011, "pdf": "http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf"}, {"id": "2a6bba2e81d5fb3c0fd0e6b757cf50ba7bf8e924", "title": "Compare and Contrast: Learning Prominent Differences in Relative Attributes", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/2a6b/ba2e81d5fb3c0fd0e6b757cf50ba7bf8e924.pdf"}, {"id": "55a158f4e7c38fe281d06ae45eb456e05516af50", "title": "Simile Classifiers for Face Classification", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/55a1/58f4e7c38fe281d06ae45eb456e05516af50.pdf"}, {"id": "b3cb91a08be4117d6efe57251061b62417867de9", "title": "Label propagation approach for predicting missing biographic labels in face-based biometric records", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/b3cb/91a08be4117d6efe57251061b62417867de9.pdf"}, {"id": "88e2efab01e883e037a416c63a03075d66625c26", "title": "Convolutional Experts Constrained Local Model for 3D Facial Landmark Detection", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265507"}, {"id": "19dd371e1649ab55a46f4b98890d6937a411ec5d", "title": "Face recognition despite missing information", "year": 2011, "pdf": "http://www.ll.mit.edu/mission/cybersec/publications/publication-files/full_papers/2011_11_17_DagliC_HST_FP.pdf"}, {"id": "8035e8796ed5bdd44477c523cd6b03f9adfa2d8e", "title": "Multimodal Feature Level Fusion based on Particle Swarm Optimization with Deep Transfer Learning", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8477817"}, {"id": "26ac607a101492bc86fd81a141311066cfe9e2b5", "title": "Sieving Regression Forest Votes for Facial Feature Detection in the Wild", "year": 2013, "pdf": "http://www.eecs.qmul.ac.uk/~hy300/papers/YangPatrasiccv2013.pdf"}, {"id": "fe0c51fd41cb2d5afa1bc1900bbbadb38a0de139", "title": "Bayesian face recognition using 2D Gaussian-Hermite moments", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/fe0c/51fd41cb2d5afa1bc1900bbbadb38a0de139.pdf"}, {"id": "ac12a36330248eaddadd3e6e75b909e023c7674a", "title": "Towards generic image classification using tree-based learning: An extensive empirical study", "year": "2016", "pdf": "http://doi.org/10.1016/j.patrec.2016.01.006"}, {"id": "f95ba7673789d1b4118d30e360a5a37fd75d3961", "title": "Face Recognition using Modified Generalized Hough Transform and Gradient Distance Descriptor", "year": "2013", "pdf": "https://pdfs.semanticscholar.org/f95b/a7673789d1b4118d30e360a5a37fd75d3961.pdf"}, {"id": "6d91da37627c05150cb40cac323ca12a91965759", "title": "Gender Politics in the 2016 U.S. Presidential Election: A Computer Vision Approach", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/6d91/da37627c05150cb40cac323ca12a91965759.pdf"}, {"id": "adfaf01773c8af859faa5a9f40fb3aa9770a8aa7", "title": "Large Scale Visual Recognition", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/adfa/f01773c8af859faa5a9f40fb3aa9770a8aa7.pdf"}, {"id": "14e949f5754f9e5160e8bfa3f1364dd92c2bb8d6", "title": "Multi-subregion based correlation filter bank for robust face recognition", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/4b76/694ff2efb302074adf1ba6052d643177abd1.pdf"}, {"id": "d31328b12eef33e7722b8e5505d0f9d9abe2ffd9", "title": "Deep Unsupervised Domain Adaptation for Face Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373866"}, {"id": "cc9d068cf6c4a30da82fd6350a348467cb5086d4", "title": "Protecting Your Faces: MeshFaces Generation and Removal via High-Order Relation-Preserving CycleGAN", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411204"}, {"id": "57178b36c21fd7f4529ac6748614bb3374714e91", "title": "IARPA Janus Benchmark - C: Face Dataset and Protocol", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217"}, {"id": "c7c8d150ece08b12e3abdb6224000c07a6ce7d47", "title": "DeMeshNet: Blind Face Inpainting for Deep MeshFace Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1611.05271.pdf"}, {"id": "9ba4a5e0b7bf6e26563d294f1f3de44d95b7f682", "title": "To Frontalize or Not to Frontalize: Do We Really Need Elaborate Pre-processing to Improve Face Recognition?", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354113"}, {"id": "f355e54ca94a2d8bbc598e06e414a876eb62ef99", "title": "A survey on heterogeneous face recognition: Sketch, infra-red, 3D and low-resolution", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/f355/e54ca94a2d8bbc598e06e414a876eb62ef99.pdf"}, {"id": "5c19c4c6a663fe185a739a5f50cef6a12a4635a1", "title": "Lucas-Kanade based entropy congealing for joint face alignment", "year": 2012, "pdf": "https://doi.org/10.1016/j.imavis.2012.08.016"}, {"id": "91b1a59b9e0e7f4db0828bf36654b84ba53b0557", "title": "Simultaneous Hallucination and Recognition of Low-Resolution Faces Based on Singular Value Decomposition", "year": 2015, "pdf": "http://www.kresttechnology.com/krest-academic-projects/krest-mtech-projects/ECE/MTech%20DSP%202015-16/MTech%20DSP%20BasePaper%202015-16/50.pdf"}, {"id": "2f59f28a1ca3130d413e8e8b59fb30d50ac020e2", "title": "Children Gender Recognition Under Unconstrained Conditions Based on Contextual Information", "year": 2014, "pdf": "http://pralab.diee.unica.it/sites/default/files/Satta_ICPR2014.pdf"}, {"id": "b7b461f82c911f2596b310e2b18dd0da1d5d4491", "title": "K-mappings and Regression trees", "year": 2014, "pdf": "http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p2961-wang.pdf"}, {"id": "5d5cd6fa5c41eb9d3d2bab3359b3e5eb60ae194e", "title": "Face Recognition Algorithms", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/5d5c/d6fa5c41eb9d3d2bab3359b3e5eb60ae194e.pdf"}, {"id": "6b089627a4ea24bff193611e68390d1a4c3b3644", "title": "Cross-Pollination of Normalization Techniques From Speaker to Face Authentication Using Gaussian Mixture Models", "year": 2012, "pdf": "http://publications.idiap.ch/downloads/reports/2012/Wallace_Idiap-RR-03-2012.pdf"}, {"id": "0e36bf238d2db6c970ade0b5f68811ed6debc4e8", "title": "Recognizing Partial Biometric Patterns", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.07399.pdf"}, {"id": "52a9f957f776c8b3d913cfcd20452b9e31c27845", "title": "OPML: A one-pass closed-form solution for online metric learning", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/52a9/f957f776c8b3d913cfcd20452b9e31c27845.pdf"}, {"id": "75e7fa7290b9b740559725b9c59df0d457523ee3", "title": "A pr 2 01 8 Ultra Power-Efficient CNN Domain Specific Accelerator with 9 . 3 TOPS / Watt for Mobile and Embedded Applications", "year": "2018", "pdf": null}, {"id": "710011644006c18291ad512456b7580095d628a2", "title": "Learning Residual Images for Face Attribute Manipulation", "year": "2017", "pdf": "https://arxiv.org/pdf/1612.05363.pdf"}, {"id": "79581c364cefe53bff6bdd224acd4f4bbc43d6d4", "title": "Descriptors and regions of interest fusion for in- and cross-database gender classification in the wild", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/7958/1c364cefe53bff6bdd224acd4f4bbc43d6d4.pdf"}, {"id": "34b3b14b4b7bfd149a0bd63749f416e1f2fc0c4c", "title": "The AXES submissions at TrecVid 2013", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/9e97/360b519d9912ded55618ccbb000d74d8e35c.pdf"}, {"id": "fdbe7c520568d9a32048270d2c87113c635dc7e6", "title": "Live Stream Oriented Age and Gender Estimation using Boosted LBP Histograms Comparisons", "year": "2014", "pdf": "https://pdfs.semanticscholar.org/fdbe/7c520568d9a32048270d2c87113c635dc7e6.pdf"}, {"id": "b0c1615ebcad516b5a26d45be58068673e2ff217", "title": "How Image Degradations Affect Deep CNN-Based Face Recognition?", "year": "2016", "pdf": "https://arxiv.org/pdf/1608.05246.pdf"}, {"id": "b6259115b819424de53bb92f64cc459dcb649f31", "title": "Learning Feature Representation for Face Verification", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AVSS.2017.8078466"}, {"id": "363e5a0e4cd857e98de72a726ad6f80cea9c50ab", "title": "Fast Landmark Localization With 3D Component Reconstruction and CNN for Cross-Pose Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1708.09580.pdf"}, {"id": "2e3d081c8f0e10f138314c4d2c11064a981c1327", "title": "A Comprehensive Performance Evaluation of Deformable Face Tracking \u201cIn-the-Wild\u201d", "year": 2017, "pdf": "http://arxiv.org/pdf/1603.06015v1.pdf"}, {"id": "fff0a848b57361e1e99548c95fbc2ec9ae00ce32", "title": "A Robust Approach for Gender Recognition Using Deep Learning", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8494194"}, {"id": "1a45ddaf43bcd49d261abb4a27977a952b5fff12", "title": "LDOP: Local Directional Order Pattern for Robust Face Retrieval", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/1a45/ddaf43bcd49d261abb4a27977a952b5fff12.pdf"}, {"id": "f604c312ff4706f1849078b2ca28409f0fcd859d", "title": "Compact deep learned feature-based face recognition for Visual Internet of Things", "year": 2017, "pdf": null}, {"id": "9f8ebf149aed8a0eda5c3375c9947c6b26eb7873", "title": "FANS: face annotation by searching large-scale web facial images", "year": 2013, "pdf": "http://www.cais.ntu.edu.sg/~chhoi/paper_pdf/fp21-wang.pdf"}, {"id": "ff398e7b6584d9a692e70c2170b4eecaddd78357", "title": "Title of dissertation : FACE RECOGNITION AND VERIFICATION IN UNCONSTRAINED ENVIRIONMENTS", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/ff39/8e7b6584d9a692e70c2170b4eecaddd78357.pdf"}, {"id": "ec40df721a80c62d4a768fe29b58d86b1a07f435", "title": "Local robust sparse representation for face recognition with single sample per person", "year": 2018, "pdf": null}, {"id": "c03f48e211ac81c3867c0e787bea3192fcfe323e", "title": "Mahalanobis Metric Scoring Learned from Weighted Pairwise Constraints in I-Vector Speaker Recognition System", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/c03f/48e211ac81c3867c0e787bea3192fcfe323e.pdf"}, {"id": "03d9ccce3e1b4d42d234dba1856a9e1b28977640", "title": "Facial Affect \"In-the-Wild\": A Survey and a New Database", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/03d9/ccce3e1b4d42d234dba1856a9e1b28977640.pdf"}, {"id": "8959e0e9a24c0fe79f3fd3acca9d139edc0abcfd", "title": "Appearance based gender classification with PCA and (2D)<sup>2</sup> PC A on approximation face image", "year": 2014, "pdf": null}, {"id": "2770b095613d4395045942dc60e6c560e882f887", "title": "GridFace: Face Rectification via Learning Local Homography Transformations", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.06210.pdf"}, {"id": "8913a5b7ed91c5f6dec95349fbc6919deee4fc75", "title": "BigBIRD: A large-scale 3D database of object instances", "year": 2014, "pdf": "https://people.eecs.berkeley.edu/~pabbeel/papers/2014-ICRA-BigBIRD.pdf"}, {"id": "9b1c218a55ead45296bfd7ad315aaeff1ae9983e", "title": "Hierarchical Spatio-Temporal Probabilistic Graphical Model with Multiple Feature Fusion for Binary Facial Attribute Classification in Real-World Face Videos", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2481396"}, {"id": "56a653fea5c2a7e45246613049fb16b1d204fc96", "title": "Quaternion Collaborative and Sparse Representation With Application to Color Face Recognition", "year": 2016, "pdf": "http://ieeeprojectsmadurai.com/matlab2016base/Quaternion%20Collaborative%20and%20Sparse%20Representation.pdf"}, {"id": "0fc4d0c328036cc197a48f278f7c15cb12860f3a", "title": "Learning a non-linear combination of Mahalanobis distances using statistical inference for similarity measure", "year": "2015", "pdf": "http://doi.org/10.1049/iet-cvi.2014.0011"}, {"id": "a3d8b5622c4b9af1f753aade57e4774730787a00", "title": "Pose-Aware Person Recognition", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.10120.pdf"}, {"id": "5fc97d6cb5af21ed196e44f22cee31ce8c51ef13", "title": "NeuroDSP Accelerator for Face Detection Application", "year": 2015, "pdf": "http://doi.acm.org/10.1145/2742060.2743769"}, {"id": "1584edf8106e8f697f19b726e011b9717de0e4db", "title": "Subclass representation-based face-recognition algorithm derived from the structure scatter of training samples", "year": 2016, "pdf": "https://doi.org/10.1049/iet-cvi.2015.0350"}, {"id": "d458c49a5e34263c95b3393386b5d76ba770e497", "title": "A Comparative Analysis of Gender Classification Techniques", "year": "2013", "pdf": "https://pdfs.semanticscholar.org/d458/c49a5e34263c95b3393386b5d76ba770e497.pdf"}, {"id": "2f13dd8c82f8efb25057de1517746373e05b04c4", "title": "Evaluation of state-of-the-art algorithms for remote face recognition", "year": 2010, "pdf": "https://doi.org/10.1109/ICIP.2010.5652608"}, {"id": "85860d38c66a5cf2e6ffd6475a3a2ba096ea2920", "title": "Celeb-500K: A Large Training Dataset for Face Recognition", "year": "2018", "pdf": "http://doi.org/10.1109/ICIP.2018.8451704"}, {"id": "b97f694c2a111b5b1724eefd63c8d64c8e19f6c9", "title": "Group Affect Prediction Using Multimodal Distributions", "year": "2018", "pdf": "https://arxiv.org/pdf/1710.01216.pdf"}, {"id": "6fa7a1c8a858157deee3b582099e5e234798bb4a", "title": "Semi-supervised evaluation of face recognition in videos", "year": 2013, "pdf": "http://doi.acm.org/10.1145/2501105.2501107"}, {"id": "72da7e3cf1136dd0c916f9e966937da0e26c64b6", "title": "Continuous biometric authentication using Possibilistic C-Means", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8491508"}, {"id": "189a839c708f95772ccaad72bfb4d0321d1535d6", "title": "An efficient fusion method of distance metric learning and random forests distance for image verification", "year": 2014, "pdf": null}, {"id": "108b2581e07c6b7ca235717c749d45a1fa15bb24", "title": "Using Stereo Matching with General Epipolar Geometry for 2D Face Recognition across Pose", "year": 2009, "pdf": "http://www.cs.umd.edu/~djacobs/pubs_files/TPAMI_Proofs.pdf"}, {"id": "3dc522a6576c3475e4a166377cbbf4ba389c041f", "title": "The iNaturalist Challenge 2017 Dataset", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/3dc5/22a6576c3475e4a166377cbbf4ba389c041f.pdf"}, {"id": "cb38b4a5e517b4bcb00efbb361f4bdcbcf1dca2c", "title": "Learning towards Minimum Hyperspherical Energy", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.09298.pdf"}, {"id": "62d1a31b8acd2141d3a994f2d2ec7a3baf0e6dc4", "title": "Noise-resistant network: a deep-learning method for face recognition under noise", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/62d1/a31b8acd2141d3a994f2d2ec7a3baf0e6dc4.pdf"}, {"id": "b972683d702a65d3ee7a25bc931a5890d1072b6b", "title": "Demographic Analysis from Biometric Data: Achievements, Challenges, and New Frontiers", "year": 2018, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2017.2669035"}, {"id": "133f01aec1534604d184d56de866a4bd531dac87", "title": "Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics", "year": 2011, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230"}, {"id": "3e9ab40e6e23f09d16c852b74d40264067ac6abc", "title": "Learning Locally-Adaptive Decision Functions for Person Verification", "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619307"}, {"id": "4526992d4de4da2c5fae7a5ceaad6b65441adf9d", "title": "System for Medical Mask Detection in the Operating Room Through Facial Attributes", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/4526/992d4de4da2c5fae7a5ceaad6b65441adf9d.pdf"}, {"id": "2563fc1797f187e2f6f9d9f4387d4bcadd3fbd02", "title": "Just Noticeable Differences in Visual Attributes", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410635"}, {"id": "0e4fa61871755b5548a5c970c8103f7b2ada24f3", "title": "Partial Face Recognition Based on Template Matching", "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/SITIS.2015.19"}, {"id": "e56c4c41bfa5ec2d86c7c9dd631a9a69cdc05e69", "title": "Human Activity Recognition Based on Wearable Sensor Data: A Standardization of the State-of-the-Art", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.05226.pdf"}, {"id": "02e133aacde6d0977bca01ffe971c79097097b7f", "title": "Convolutional Neural Fabrics", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/02e1/33aacde6d0977bca01ffe971c79097097b7f.pdf"}, {"id": "85ccf2c9627a988ebab7032d0ec2d76ec7832c98", "title": "Multi-view face detection using Normalized Pixel Difference feature", "year": 2017, "pdf": null}, {"id": "f3495bf7f7d827c72cc4e7a4850eaf54a998db11", "title": "Trends and Controversies", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8423530"}, {"id": "0a511058edae582e8327e8b9d469588c25152dc6", "title": "Memory Constrained Face Recognition Ashish Kapoor", "year": "", "pdf": "http://pdfs.semanticscholar.org/0a51/1058edae582e8327e8b9d469588c25152dc6.pdf"}, {"id": "c866a2afc871910e3282fd9498dce4ab20f6a332", "title": "Surveillance Face Recognition Challenge", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.09691.pdf"}, {"id": "176d9121e4e645344de4706dfb345ad456bfb84a", "title": "Efficient algorithm for sparse coding and dictionary learning with applications to face recognition", "year": 2015, "pdf": "https://doi.org/10.1117/1.JEI.24.2.023009"}, {"id": "75249ebb85b74e8932496272f38af274fbcfd696", "title": "Face Identification in Large Galleries", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/7524/9ebb85b74e8932496272f38af274fbcfd696.pdf"}, {"id": "641f0989b87bf7db67a64900dcc9568767b7b50f", "title": "Reconstructing faces from their signatures using RBF regression", "year": "2013", "pdf": "https://pdfs.semanticscholar.org/e25a/6836e5f5dc6cf691cd9c42224c0f7f4bb42c.pdf"}, {"id": "f6f06be05981689b94809130e251f9e4bf932660", "title": "An Approach to Illumination and Expression Invariant Multiple Classifier Face Recognition", "year": "2014", "pdf": "https://pdfs.semanticscholar.org/fa86/ec19c1aec46202e0df12d209eb8062d53f7b.pdf"}, {"id": "ef999ab2f7b37f46445a3457bf6c0f5fd7b5689d", "title": "Improving face verification in photo albums by combining facial recognition and metadata with cross-matching", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/001e/ad9b99ee57af44e1831be1670c40711d348d.pdf"}, {"id": "f4373f5631329f77d85182ec2df6730cbd4686a9", "title": "Recognizing Gender from Human Facial Regions using Genetic Algorithm", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/f437/3f5631329f77d85182ec2df6730cbd4686a9.pdf"}, {"id": "15136c2f94fd29fc1cb6bedc8c1831b7002930a6", "title": "Deep Learning Architectures for Face Recognition in Video Surveillance", "year": "2018", "pdf": "https://arxiv.org/pdf/1802.09990.pdf"}, {"id": "cb2470aade8e5630dcad5e479ab220db94ecbf91", "title": "Exploring Facial Differences in European Countries Boundary by Fine-Tuned Neural Networks", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397018"}, {"id": "d44a93027208816b9e871101693b05adab576d89", "title": "On the Capacity of Face Representation", "year": "2017", "pdf": "https://arxiv.org/pdf/1709.10433.pdf"}, {"id": "dc3dc18b6831c867a8d65da130a9ff147a736745", "title": "Face Recognition on Drones: Issues and Limitations", "year": "2015", "pdf": "http://dl.acm.org/citation.cfm?id=2750679"}, {"id": "2faa09413162b0a7629db93fbb27eda5aeac54ca", "title": "Quantifying how lighting and focus affect face recognition performance", "year": 2010, "pdf": "https://doi.org/10.1109/CVPRW.2010.5543228"}, {"id": "26fcefb80af66391e07e6239933de943c1cddc6e", "title": "Pose, illumination and expression invariant pairwise face-similarity measure via Doppelgänger list comparison", "year": 2011, "pdf": null}, {"id": "fdb33141005ca1b208a725796732ab10a9c37d75", "title": "A connectionist computational method for face recognition", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/fdb3/3141005ca1b208a725796732ab10a9c37d75.pdf"}, {"id": "40b10e330a5511a6a45f42c8b86da222504c717f", "title": "Implementing the Viola-Jones Face Detection Algorithm", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/40b1/0e330a5511a6a45f42c8b86da222504c717f.pdf"}, {"id": "6ca6ade6c9acb833790b1b4e7ee8842a04c607f7", "title": "Deep Transfer Network for Unconstrained Face Verification", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3234805"}, {"id": "23120f9b39e59bbac4438bf4a8a7889431ae8adb", "title": "Improved RGB-D-T based face recognition", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/2312/0f9b39e59bbac4438bf4a8a7889431ae8adb.pdf"}, {"id": "5905b4610389cd3b11a3a1ce06c05fee36a97f86", "title": "Unconstrained Face Recognition Using a Set-to-Set Distance Measure on Deep Learned Features", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7936556"}, {"id": "a532cfc69259254192aee3fc5be614d9197e7824", "title": "Joint and collaborative representation with local adaptive convolution feature for face recognition with single sample per person", "year": "2017", "pdf": "http://doi.org/10.1016/j.patcog.2016.12.028"}, {"id": "474b461cd12c6d1a2fbd67184362631681defa9e", "title": "Multi-resolution fusion of DTCWT and DCT for shift invariant face recognition", "year": 2014, "pdf": "http://toc.proceedings.com/24478webtoc.pdf"}, {"id": "aca728cab26b95fbe04ec230b389878656d8af5b", "title": "Knowledge Computing and its Applications", "year": "2018", "pdf": "http://doi.org/10.1007/978-981-10-8258-0"}, {"id": "9806d3dc7805dd8c9c20d7222c915fc4beee7099", "title": "Self-Stimulatory Behaviours in the Wild for Autism Diagnosis", "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6755972"}, {"id": "6332a99e1680db72ae1145d65fa0cccb37256828", "title": "MASTER IN COMPUTER VISION AND ARTIFICIAL INTELLIGENCE REPORT OF THE RESEARCH PROJECT OPTION: COMPUTER VISION Pose and Face Recovery via Spatio-temporal GrabCut Human Segmentation", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/6332/a99e1680db72ae1145d65fa0cccb37256828.pdf"}, {"id": "1d53aebe67d0e088e2da587fd6b08c8e8ed7f45c", "title": "A Selection Module for Large-Scale Face Recognition Systems", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/1d53/aebe67d0e088e2da587fd6b08c8e8ed7f45c.pdf"}, {"id": "4f591e243a8f38ee3152300bbf42899ac5aae0a5", "title": "Understanding Higher-Order Shape via 3D Shape Attributes", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4f59/1e243a8f38ee3152300bbf42899ac5aae0a5.pdf"}, {"id": "b5857b5bd6cb72508a166304f909ddc94afe53e3", "title": "SSIG and IRISA at Multimodal Person Discovery", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/b585/7b5bd6cb72508a166304f909ddc94afe53e3.pdf"}, {"id": "4180978dbcd09162d166f7449136cb0b320adf1f", "title": "Real-time head pose classification in uncontrolled environments with Spatio-Temporal Active Appearance Models", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/4180/978dbcd09162d166f7449136cb0b320adf1f.pdf"}, {"id": "2cb5db4df50921d276ad9e7186119a276324e465", "title": "Subtasks of Unconstrained Face Recognition", "year": 2014, "pdf": "http://cbcl.mit.edu/projects/cbcl/publications/ps/Leibo_Liao_Poggio_VISAPP_2014.pdf"}, {"id": "934efd61b20f5b8b151a2df7cd373f0b387c02b0", "title": "Photo rating of facial pictures based on image segmentation", "year": 2014, "pdf": "https://doi.org/10.5220/0004673003290336"}, {"id": "3176ee88d1bb137d0b561ee63edf10876f805cf0", "title": "Recombinator Networks: Learning Coarse-to-Fine Feature Aggregation", "year": "2016", "pdf": "https://arxiv.org/pdf/1511.07356.pdf"}, {"id": "464de30d3310123644ab81a1f0adc51598586fd2", "title": "Covariance descriptor based on bio-inspired features for person re-identification and face verification", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/464d/e30d3310123644ab81a1f0adc51598586fd2.pdf"}, {"id": "4b4106614c1d553365bad75d7866bff0de6056ed", "title": "Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf"}, {"id": "01c4cf9c7c08f0ad3f386d88725da564f3c54679", "title": "Interpretability Beyond Feature Attribution: Quantitative Testing with Concept Activation Vectors (TCAV)", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/01c4/cf9c7c08f0ad3f386d88725da564f3c54679.pdf"}, {"id": "0faf441a1ef1e788fb9ccd20484b104a1fa95ee8", "title": "A brief review on techniques for recognizing images under varying poses", "year": 2015, "pdf": null}, {"id": "04616814f1aabe3799f8ab67101fbaf9fd115ae4", "title": "UNIVERSIT\u00c9 DE CAEN BASSE NORMANDIE U . F . R . de Sciences", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/0461/6814f1aabe3799f8ab67101fbaf9fd115ae4.pdf"}, {"id": "0db8e6eb861ed9a70305c1839eaef34f2c85bbaf", "title": "Towards Large-Pose Face Frontalization in the Wild", "year": 2017, "pdf": "https://arxiv.org/pdf/1704.06244v1.pdf"}, {"id": "06560d5721ecc487a4d70905a485e22c9542a522", "title": "Deep Facial Attribute Detection in the Wild: From General to Specific", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/0656/0d5721ecc487a4d70905a485e22c9542a522.pdf"}, {"id": "927ad0dceacce2bb482b96f42f2fe2ad1873f37a", "title": "Interest-Point based Face Recognition System", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/927a/d0dceacce2bb482b96f42f2fe2ad1873f37a.pdf"}, {"id": "4f623e3821d14553b3b286e20910db9225fb723f", "title": "Audio-Visual Person Recognition in Multimedia Data From the Iarpa Janus Program", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462122"}, {"id": "524c25217a6f1ed17f47871e947a5581d775fa56", "title": "Fusing global and local features for face verification", "year": 2013, "pdf": "https://doi.org/10.1117/12.2030875"}, {"id": "c20b2d365186f4471950fbe1ef8755de90efc000", "title": "Face verification with fully dynamic size blocks based on landmark detection", "year": 2014, "pdf": null}, {"id": "7bdcd85efd1e3ce14b7934ff642b76f017419751", "title": "Learning Discriminant Face Descriptor", "year": 2014, "pdf": "http://www.cbsr.ia.ac.cn/users/zlei/papers/Lei-DFD-PAMI-14.pdf"}, {"id": "259706f1fd85e2e900e757d2656ca289363e74aa", "title": "Improving People Search Using Query Expansions: How Friends Help To Find People", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/6f98/3e8f26066f2ea486f6653b87154360d948ca.pdf"}, {"id": "66a2c229ac82e38f1b7c77a786d8cf0d7e369598", "title": "A Probabilistic Adaptive Search System for Exploring the Face Space", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/66a2/c229ac82e38f1b7c77a786d8cf0d7e369598.pdf"}, {"id": "42cc9ea3da1277b1f19dff3d8007c6cbc0bb9830", "title": "Coordinated Local Metric Learning", "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Saxena_Coordinated_Local_Metric_ICCV_2015_paper.pdf"}, {"id": "c8adbe00b5661ab9b3726d01c6842c0d72c8d997", "title": "Deep Architectures for Face Attributes", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/c8ad/be00b5661ab9b3726d01c6842c0d72c8d997.pdf"}, {"id": "06bd34951305d9f36eb29cf4532b25272da0e677", "title": "A Fast and Accurate System for Face Detection, Identification, and Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.07586.pdf"}, {"id": "1da5fc63d66fbf750b0e15c5ef6d4274ca73cca1", "title": "Research on face recognition method based on deep learning in natural environment", "year": 2017, "pdf": null}, {"id": "82417d8ec8ac6406f2d55774a35af2a1b3f4b66e", "title": "Some Faces are More Equal than Others: Hierarchical Organization for Accurate and Efficient Large-Scale Identity-Based Face Retrieval", "year": "2014", "pdf": "https://pdfs.semanticscholar.org/8241/7d8ec8ac6406f2d55774a35af2a1b3f4b66e.pdf"}, {"id": "040dc119d5ca9ea3d5fc39953a91ec507ed8cc5d", "title": "Large-scale Bisample Learning on ID vs. Spot Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.03018.pdf"}, {"id": "307a810d1bf6f747b1bd697a8a642afbd649613d", "title": "An affordable contactless security system access for restricted area", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/307a/810d1bf6f747b1bd697a8a642afbd649613d.pdf"}, {"id": "8798d2243e852be5285948a93abdef65751ccc47", "title": "Parameter learning for the belief rule base system in the residual life probability prediction of metalized film capacitor", "year": "2015", "pdf": "http://doi.org/10.1016/j.knosys.2014.09.006"}, {"id": "46c82cfadd9f885f5480b2d7155f0985daf949fc", "title": "3D Shape Attributes", "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780537"}, {"id": "68c17aa1ecbff0787709be74d1d98d9efd78f410", "title": "Gender Classification from Face Images Using Mutual Information and Feature Fusion", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/68c1/7aa1ecbff0787709be74d1d98d9efd78f410.pdf"}, {"id": "610a4451423ad7f82916c736cd8adb86a5a64c59", "title": "A Survey on Search Based Face Annotation Using Weakly Labelled Facial Images", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/610a/4451423ad7f82916c736cd8adb86a5a64c59.pdf"}, {"id": "1d729693a888a460ee855040f62bdde39ae273af", "title": "Photorealistic Face De-Identification by Aggregating Donors' Face Components", "year": "2014", "pdf": "https://pdfs.semanticscholar.org/9da1/91858f65fd99c9b204a6f68916711d4bd51b.pdf"}, {"id": "70f189798c8b9f2b31c8b5566a5cf3107050b349", "title": "The challenge of face recognition from digital point-and-shoot cameras", "year": 2013, "pdf": "http://www.cs.colostate.edu/~vision/pasc/docs/pasc2013_NISTIR_061013.pdf"}, {"id": "3965d61c4f3b72044f43609c808f8760af8781a2", "title": "Diverse Conditional Image Generation by Stochastic Regression with Latent Drop-Out Codes", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.01121.pdf"}, {"id": "cfbb2d32586b58f5681e459afd236380acd86e28", "title": "Improving alignment of faces for recognition", "year": 2011, "pdf": "https://doi.org/10.1109/ROSE.2011.6058545"}, {"id": "1b6394178dbc31d0867f0b44686d224a19d61cf4", "title": "EPML: Expanded Parts Based Metric Learning for Occlusion Robust Face Verification", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/ca8e/5419fd570f19643425b24da801283b706fc1.pdf"}, {"id": "5b2cfee6e81ef36507ebf3c305e84e9e0473575a", "title": "GoDP: Globally Optimized Dual Pathway deep network architecture for facial landmark localization in-the-wild", "year": "2018", "pdf": "https://arxiv.org/pdf/1704.02402.pdf"}, {"id": "02a92b79391ddac0acef4f665b396f7f39ca2972", "title": "Fusing landmark-based features at kernel level for face recognition", "year": 2017, "pdf": "https://doi.org/10.1016/j.patcog.2016.10.021"}, {"id": "1056347fc5e8cd86c875a2747b5f84fd570ba232", "title": "Multi-Camera Action Dataset for Cross-Camera Action Recognition Benchmarking", "year": 2017, "pdf": "http://arxiv.org/pdf/1607.06408v1.pdf"}, {"id": "ada56c9ceef50aa5159f1f8aa45ca2040d1ed15c", "title": "Soft Biometrics: Globally Coherent Solutions for Hair Segmentation and Style Recognition Based on Hierarchical MRFs", "year": 2017, "pdf": "https://doi.org/10.1109/TIFS.2017.2680246"}, {"id": "352a620f0b96a7e76b9195a7038d5eec257fd994", "title": "Kinship Classification through Latent Adaptive Subspace", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373823"}, {"id": "2c06781ba75d51f5246d65d1acf66ab182e9bde6", "title": "Open-set face recognition across look-alike faces in real-world scenarios", "year": 2017, "pdf": "https://doi.org/10.1016/j.imavis.2016.11.002"}, {"id": "1ce29d6b820ed4a24da27b76ffd9605d5b3b10b5", "title": "Unrestricted pose-invariant face recognition by sparse dictionary matrix", "year": 2015, "pdf": "https://doi.org/10.1016/j.imavis.2015.01.007"}, {"id": "370e0d9b89518a6b317a9f54f18d5398895a7046", "title": "Cross-pollination of normalisation techniques from speaker to face authentication using Gaussian mixture models", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/370e/0d9b89518a6b317a9f54f18d5398895a7046.pdf"}, {"id": "282a3ee79a08486f0619caf0ada210f5c3572367", "title": "Accelerated Training for Massive Classification via Dynamic Class Selection", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/282a/3ee79a08486f0619caf0ada210f5c3572367.pdf"}, {"id": "e69261094b118eb52ab370ab4d0c7158f51846e4", "title": "Deep Learning Based Approach for Gender Classification", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8474919"}, {"id": "dd600e7d6e4443ebe87ab864d62e2f4316431293", "title": "Improving facial expression analysis using histograms of Log-Transformed Nonnegative Sparse Representation with a Spatial Pyramid Structure", "year": 2013, "pdf": "https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553774.pdf"}, {"id": "cfb8bc66502fb5f941ecdb22aec1fdbfdb73adce", "title": "Git Loss for Deep Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.08512.pdf"}, {"id": "fe108803ee97badfa2a4abb80f27fa86afd9aad9", "title": "Kernel discriminant transformation for image set-based face recognition", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/fe10/8803ee97badfa2a4abb80f27fa86afd9aad9.pdf"}, {"id": "3a846704ef4792dd329a5c7a2cb8b330ab6b8b4e", "title": "FACE-GRAB: Face recognition with General Region Assigned to Binary operator", "year": 2010, "pdf": "http://www.wjscheirer.com/papers/wjs_cswb2010_grab.pdf"}, {"id": "be4faea0971ef74096ec9800750648b7601dda65", "title": "Feature Analysis of Unsupervised Learning for Multi-task Classification Using Convolutional Neural Network", "year": "2017", "pdf": "http://doi.org/10.1007/s11063-017-9724-1"}, {"id": "d03e4e938bcbc25aa0feb83d8a0830f9cd3eb3ea", "title": "Face Recognition with Patterns of Oriented Edge Magnitudes", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/d03e/4e938bcbc25aa0feb83d8a0830f9cd3eb3ea.pdf"}, {"id": "a92c207031b0778572bf41803dba1a21076e128b", "title": "Unobtrusive Students' Engagement Analysis in Computer Science Laboratory Using Deep Learning Techniques", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8433557"}, {"id": "f19ab817dd1ef64ee94e94689b0daae0f686e849", "title": "Blickrichtungsunabh\u00e4ngige Erkennung von Personen in Bild- und Tiefendaten", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/f19a/b817dd1ef64ee94e94689b0daae0f686e849.pdf"}, {"id": "553ec63f804e578edf221ab642a1f05612657c22", "title": "Learning pairwise image similarities for multi-classification using Kernel Regression Trees", "year": "2012", "pdf": "http://doi.org/10.1016/j.patcog.2011.09.028"}, {"id": "5de9670f72d10682bf2cb3156988346257e0489f", "title": "MetricFusion: Generalized metric swarm learning for similarity measure", "year": "2016", "pdf": "http://doi.org/10.1016/j.inffus.2015.12.004"}, {"id": "38f1fac3ed0fd054e009515e7bbc72cdd4cf801a", "title": "Finding Person Relations in Image Data of the Internet Archive", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.08246.pdf"}, {"id": "3ede3ed28329bf48fbd06438a69c4f855bef003f", "title": "Large-scale geo-facial image analysis", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/3ede/3ed28329bf48fbd06438a69c4f855bef003f.pdf"}, {"id": "28b9d92baea72ec665c54d9d32743cf7bc0912a7", "title": "Parametric temporal alignment for the detection of facial action temporal segments", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/a7f8/b6bf6aa7a12773ad9bcf1d040d4d74d12493.pdf"}, {"id": "d59404354f84ad98fa809fd1295608bf3d658bdc", "title": "Face Synthesis from Visual Attributes via Sketch using Conditional VAEs and GANs", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/d594/04354f84ad98fa809fd1295608bf3d658bdc.pdf"}, {"id": "6966d9d30fa9b7c01523425726ab417fd8428790", "title": "Exemplar-Based Face Parsing", "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619291"}, {"id": "3802da31c6d33d71b839e260f4022ec4fbd88e2d", "title": "Deep Attributes for One-Shot Face Recognition", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/3802/da31c6d33d71b839e260f4022ec4fbd88e2d.pdf"}, {"id": "b375db63742f8a67c2a7d663f23774aedccc84e5", "title": "Brain-Inspired Classroom Occupancy Monitoring on a Low-Power Mobile Platform", "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2014.95"}, {"id": "d89cfed36ce8ffdb2097c2ba2dac3e2b2501100d", "title": "Robust Face Recognition via Multimodal Deep Face Representation", "year": "2015", "pdf": "https://arxiv.org/pdf/1509.00244.pdf"}, {"id": "3a9681e2e07be7b40b59c32a49a6ff4c40c962a2", "title": "Comparing treatment means : overlapping standard errors , overlapping confidence intervals , and tests of hypothesis", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/1c95/1714996c573b00e63878acdc48cdc4ddc183.pdf"}, {"id": "571b83f7fc01163383e6ca6a9791aea79cafa7dd", "title": "SeqFace: Make full use of sequence information for face recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1803.06524.pdf"}, {"id": "74c8116d647612e8cd20a2528eeed38f76d09126", "title": "Measuring measures for face sample quality", "year": 2011, "pdf": null}, {"id": "96ccd94151a348c9829ab1d943cb13e9e933952f", "title": "A face detector based on color and texture", "year": 2014, "pdf": null}, {"id": "49df381ea2a1e7f4059346311f1f9f45dd997164", "title": "Client-Specific Anomaly Detection for Face Presentation Attack Detection", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.00848.pdf"}, {"id": "230527d37421c28b7387c54e203deda64564e1b7", "title": "Person Re-identification: System Design and Evaluation Overview", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/2305/27d37421c28b7387c54e203deda64564e1b7.pdf"}, {"id": "87e5b4d95f95a0975e855cf5ad402db7a3c64ff5", "title": "Local Novelty Detection in Multi-class Recognition Problems", "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/WACV.2015.113"}, {"id": "9854145f2f64d52aac23c0301f4bb6657e32e562", "title": "An Improved Face Verification Approach Based on Speedup Robust Features and Pairwise Matching", "year": 2013, "pdf": "http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2013.57"}, {"id": "25127c2d9f14d36f03d200a65de8446f6a0e3bd6", "title": "Evaluating the Performance of Deep Supervised Auto Encoder in Single Sample Face Recognition Problem Using Kullback-leibler Divergence Sparsity Regularizer", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/2512/7c2d9f14d36f03d200a65de8446f6a0e3bd6.pdf"}, {"id": "13f065d4e6dfe2a130bd64d73eee97d10d9f7d33", "title": "A Study of the Region Covariance Descriptor: Impact of Feature Selection and Image Transformations", "year": 2015, "pdf": "https://doi.org/10.1109/DICTA.2015.7371222"}, {"id": "8e3d0b401dec8818cd0245c540c6bc032f169a1d", "title": "McGan: Mean and Covariance Feature Matching GAN", "year": "2017", "pdf": "https://arxiv.org/pdf/1702.08398.pdf"}, {"id": "6d10beb027fd7213dd4bccf2427e223662e20b7d", "title": "User Adaptive and Context-Aware Smart Home Using Pervasive and Semantic Technologies", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/6d10/beb027fd7213dd4bccf2427e223662e20b7d.pdf"}, {"id": "458677de7910a5455283a2be99f776a834449f61", "title": "Face Image Retrieval Using Facial Attributes By K-Means", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/4586/77de7910a5455283a2be99f776a834449f61.pdf"}, {"id": "c3285a1d6ec6972156fea9e6dc9a8d88cd001617", "title": "Extreme 3D Face Reconstruction: Seeing Through Occlusions", "year": "2017", "pdf": "https://arxiv.org/pdf/1712.05083.pdf"}, {"id": "e988be047b28ba3b2f1e4cdba3e8c94026139fcf", "title": "Multi-Task Convolutional Neural Network for Pose-Invariant Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1702.04710.pdf"}, {"id": "f33bd953d2df0a5305fc8a93a37ff754459a906c", "title": "Deformable Models of Ears in-the-Wild for Alignment and Recognition", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961800"}, {"id": "76cd5e43df44e389483f23cb578a9015d1483d70", "title": "Face Verification from Depth using Privileged Information", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/76cd/5e43df44e389483f23cb578a9015d1483d70.pdf"}, {"id": "8e94ed0d7606408a0833e69c3185d6dcbe22bbbe", "title": "For your eyes only", "year": 2012, "pdf": "http://doi.ieeecomputersociety.org/10.1109/WACV.2012.6163013"}, {"id": "12ba7c6f559a69fbfaacf61bfb2f8431505b09a0", "title": "DocFace+: ID Document to Selfie Matching", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.05620.pdf"}, {"id": "9660594e91ca3b37e573a0408f3a10f5107e443f", "title": "Fine-grained face verification: Dataset and baseline results", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139079"}, {"id": "c1482491f553726a8349337351692627a04d5dbe", "title": "When Follow is Just One Click Away: Understanding Twitter Follow Behavior in the 2016 U.S. Presidential Election", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/c148/2491f553726a8349337351692627a04d5dbe.pdf"}, {"id": "5121f42de7cb9e41f93646e087df82b573b23311", "title": "Classifying Online Dating Profiles on Tinder using FaceNet Facial Embeddings", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/5121/f42de7cb9e41f93646e087df82b573b23311.pdf"}, {"id": "08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7", "title": "Understanding Kin Relationships in a Photo", "year": 2012, "pdf": "http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf"}, {"id": "191674c64f89c1b5cba19732869aa48c38698c84", "title": "Face Image Retrieval Using Attribute - Enhanced Sparse Codewords", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/1916/74c64f89c1b5cba19732869aa48c38698c84.pdf"}, {"id": "37179032085e710d1d62a1ba2e9c1f63bb4dde91", "title": "Soft Biometrics and Their Application in Person Recognition at a Distance", "year": 2014, "pdf": "http://eprints.soton.ac.uk/363288/1/tome%20tifs.pdf"}, {"id": "5e6ba16cddd1797853d8898de52c1f1f44a73279", "title": "Face Identification with Second-Order Pooling", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/5e6b/a16cddd1797853d8898de52c1f1f44a73279.pdf"}, {"id": "4d6ad0c7b3cf74adb0507dc886993e603c863e8c", "title": "Human Activity Recognition Based on Wearable Sensor Data : A Standardization of the State-ofthe-Art", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/4d6a/d0c7b3cf74adb0507dc886993e603c863e8c.pdf"}, {"id": "d4e669d5d35fa0ca9f8d9a193c82d4153f5ffc4e", "title": "A Lightened CNN for Deep Face Representation", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/d4e6/69d5d35fa0ca9f8d9a193c82d4153f5ffc4e.pdf"}, {"id": "59bfeac0635d3f1f4891106ae0262b81841b06e4", "title": "Face Verification Using the LARK Face Representation", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/59bf/eac0635d3f1f4891106ae0262b81841b06e4.pdf"}, {"id": "2f0b8579829b3d4efdbc03c96821e33d7cc65e1d", "title": "Using a Deformation Field Model for Localizing Faces and Facial Points under Weak Supervision", "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.472"}, {"id": "814b05113ba0397d236736f94c01e85bb034c833", "title": "Local receptive field constrained deep networks", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/814b/05113ba0397d236736f94c01e85bb034c833.pdf"}, {"id": "b7f7a4df251ff26aca83d66d6b479f1dc6cd1085", "title": "Handling missing weak classifiers in boosted cascade: application to multiview and occluded face detection", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/b7f7/a4df251ff26aca83d66d6b479f1dc6cd1085.pdf"}, {"id": "486a82f50835ea888fbc5c6babf3cf8e8b9807bc", "title": "Face Search at Scale: 80 Million Gallery", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/486a/82f50835ea888fbc5c6babf3cf8e8b9807bc.pdf"}, {"id": "2004afb2276a169cdb1f33b2610c5218a1e47332", "title": "Deep Convolutional Neural Network Used in Single Sample per Person Face Recognition", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/2004/afb2276a169cdb1f33b2610c5218a1e47332.pdf"}, {"id": "198b6beb53e0e61357825d57938719f614685f75", "title": "Vaulted Verification: A Scheme for Revocable Face Recognition", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/198b/6beb53e0e61357825d57938719f614685f75.pdf"}, {"id": "34546ef7e6148d9a1fb42cfab5f0ce11c92c760a", "title": "Robust domain adaptation image classification via sparse and low rank representation", "year": 2015, "pdf": "https://doi.org/10.1016/j.jvcir.2015.09.005"}, {"id": "31a38fd2d9d4f34d2b54318021209fe5565b8f7f", "title": "Pose-Invariant Face Recognition Using Markov Random Fields", "year": 2013, "pdf": "http://www.umiacs.umd.edu/~huytho/papers/HoChellappa_TIP2013.pdf"}, {"id": "153f5ad54dd101f7f9c2ae17e96c69fe84aa9de4", "title": "Overview of algorithms for face detection and tracking", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/153f/5ad54dd101f7f9c2ae17e96c69fe84aa9de4.pdf"}, {"id": "27da432cf2b9129dce256e5bf7f2f18953eef5a5", "title": "Face Recognition in Low Quality Images: A Survey", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.11519.pdf"}, {"id": "1742e6c347037d5d4ccbdf5c7a27dfbf0afedb91", "title": "A Unified Framework for Representation-Based Subspace Clustering of Out-of-Sample and Large-Scale Data", "year": 2016, "pdf": "http://www1.i2r.a-star.edu.sg/~htang/Unified_Framework_for_Subspace_Clustering-TNNLS.pdf"}, {"id": "41c97af4801ac302f09902aeec2af17b481563ab", "title": "Collaborative multi-view metric learning for visual classification", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552996"}, {"id": "140c95e53c619eac594d70f6369f518adfea12ef", "title": "Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A", "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf"}, {"id": "5157dde17a69f12c51186ffc20a0a6c6847f1a29", "title": "Evolutionary Cost-Sensitive Extreme Learning Machine", "year": 2017, "pdf": "http://arxiv.org/pdf/1505.04373v2.pdf"}, {"id": "e43a18384695ae0acc820171236a39811ec2cd58", "title": "Kin-Verification Model on FIW Dataset Using Multi-Set Learning and Local Features", "year": 2017, "pdf": null}, {"id": "7f533bd8f32525e2934a66a5b57d9143d7a89ee1", "title": "Audio-Visual Identity Grounding for Enabling Cross Media Search", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/7f53/3bd8f32525e2934a66a5b57d9143d7a89ee1.pdf"}, {"id": "eb526174fa071345ff7b1fad1fad240cd943a6d7", "title": "Deeply vulnerable: a study of the robustness of face recognition to presentation attacks", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/eb52/6174fa071345ff7b1fad1fad240cd943a6d7.pdf"}, {"id": "c0a8c0e6ccf9882969ba0eda0b898affa015437b", "title": "Waldo: An Adaptive Human Interface for Crowd Entity Resolution", "year": 2017, "pdf": "http://stanford.edu/~verroios/papers/waldo.pdf"}, {"id": "d1a43737ca8be02d65684cf64ab2331f66947207", "title": "IJB \u2013 S : IARPA Janus Surveillance Video Benchmark \u2217", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/d1a4/3737ca8be02d65684cf64ab2331f66947207.pdf"}, {"id": "5028c0decfc8dd623c50b102424b93a8e9f2e390", "title": "Revisiting Classifier Two-sample Tests", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/5028/c0decfc8dd623c50b102424b93a8e9f2e390.pdf"}, {"id": "424259e9e917c037208125ccc1a02f8276afb667", "title": "Walk and Learn: Facial Attribute Representation Learning from Egocentric Video and Contextual Data", "year": 2016, "pdf": "http://arxiv.org/pdf/1604.06433v1.pdf"}, {"id": "cc47368fe303c6cbda38caf5ac0e1d1c9d7e2a52", "title": "University Classroom Attendance Based on Deep Learning", "year": 2017, "pdf": null}, {"id": "166186e551b75c9b5adcc9218f0727b73f5de899", "title": "Automatic Age and Gender Recognition in Human Face Image Dataset using Convolutional Neural Network System", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/1661/86e551b75c9b5adcc9218f0727b73f5de899.pdf"}, {"id": "653d19e64bd75648cdb149f755d59e583b8367e3", "title": "Decoupling \"when to update\" from \"how to update\"", "year": "2017", "pdf": "https://arxiv.org/pdf/1706.02613.pdf"}, {"id": "71f07c95a2b039cc21854c602f29e5be053f2aba", "title": "A comparison of face and facial feature detectors based on the Viola\u2013Jones general object detection framework", "year": 2010, "pdf": "https://doi.org/10.1007/s00138-010-0250-7"}, {"id": "6dcf418c778f528b5792104760f1fbfe90c6dd6a", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984"}, {"id": "406c5aeca71011fd8f8bd233744a81b53ccf635a", "title": "Scalable softmax loss for face verification", "year": 2017, "pdf": null}, {"id": "1287bfe73e381cc8042ac0cc27868ae086e1ce3b", "title": "Computational Mid-Level Vision: From Border Ownership to Categorical Object Recognition", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/1287/bfe73e381cc8042ac0cc27868ae086e1ce3b.pdf"}, {"id": "7897c8a9361b427f7b07249d21eb9315db189496", "title": "Feature selection via simultaneous sparse approximation for person specific face verification", "year": 2011, "pdf": "http://arxiv.org/abs/1102.2743"}, {"id": "2f17f6c460e02bd105dcbf14c9b73f34c5fb59bd", "title": "Robust Face Recognition Using the Deep C2D-CNN Model Based on Decision-Level Fusion", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/2f17/f6c460e02bd105dcbf14c9b73f34c5fb59bd.pdf"}, {"id": "af53ce0f3a039c685b754e1f704817e03e182412", "title": "Face recognition using LBP for personal image management system and its analysis", "year": 2011, "pdf": null}, {"id": "b49affdff167f5d170da18de3efa6fd6a50262a2", "title": "Linking Names and Faces : Seeing the Problem in Different Ways", "year": "2008", "pdf": "https://pdfs.semanticscholar.org/b49a/ffdff167f5d170da18de3efa6fd6a50262a2.pdf"}, {"id": "f73174cfcc5c329b63f19fffdd706e1df4cc9e20", "title": "Automatic Vehicle Detection and Driver Identification Framework for Secure Vehicle Parking", "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FIT.2015.13"}, {"id": "4344ba6e33faaa616d01248368e66799548ca48b", "title": "Unsupervised joint face alignment with gradient correlation coefficient", "year": 2015, "pdf": "https://doi.org/10.1007/s10044-015-0474-2"}, {"id": "02601d184d79742c7cd0c0ed80e846d95def052e", "title": "Graphical Representation for Heterogeneous Face Recognition", "year": 2017, "pdf": "http://arxiv.org/abs/1503.00488"}, {"id": "833f6ab858f26b848f0d747de502127406f06417", "title": "Learning weighted similarity measurements for unconstrained face recognition", "year": 2009, "pdf": "http://mediatum.ub.tum.de/doc/980054/157447.pdf"}, {"id": "0857281a3b6a5faba1405e2c11f4e17191d3824d", "title": "Face recognition via edge-based Gabor feature representation for plastic surgery-altered images", "year": "2014", "pdf": "https://pdfs.semanticscholar.org/0857/281a3b6a5faba1405e2c11f4e17191d3824d.pdf"}, {"id": "0a88f5936528dcfdd27df886b07e62f2fd2072d0", "title": "Supervised Congealing for Simultaneous Face Normalization and Eye Localization", "year": 2009, "pdf": null}, {"id": "5c5e1f367e8768a9fb0f1b2f9dbfa060a22e75c0", "title": "Reference Face Graph for Face Recognition", "year": 2014, "pdf": "http://www.cs.ucr.edu/~mkafai/papers/Paper_tifs2014.pdf"}, {"id": "fab83bf8d7cab8fe069796b33d2a6bd70c8cefc6", "title": "Draft: Evaluation Guidelines for Gender Classification and Age Estimation", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/fab8/3bf8d7cab8fe069796b33d2a6bd70c8cefc6.pdf"}, {"id": "c5ed62f2b57e03d0a2e0cf13772b216ffad30c19", "title": "Robustness of DR-LDP over PCANet for face analysis", "year": 2017, "pdf": null}, {"id": "37381718559f767fc496cc34ceb98ff18bc7d3e1", "title": "Harnessing Synthesized Abstraction Images to Improve Facial Attribute Recognition", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/3738/1718559f767fc496cc34ceb98ff18bc7d3e1.pdf"}, {"id": "27a0a7837f9114143717fc63294a6500565294c2", "title": "Face Recognition in Unconstrained Environments: A Comparative Study", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/27a0/a7837f9114143717fc63294a6500565294c2.pdf"}, {"id": "b8b9cef0938975c5b640b7ada4e3dea6c06d64e9", "title": "Metric-Promoted Siamese Network for Gender Classification", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.119"}, {"id": "51c7c5dfda47647aef2797ac3103cf0e108fdfb4", "title": "Cs 395t: Celebrity Look-alikes *", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/51c7/c5dfda47647aef2797ac3103cf0e108fdfb4.pdf"}, {"id": "9aab33ce8d6786b3b77900a9b25f5f4577cea461", "title": "Automatic Semantic Face Recognition", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961739"}, {"id": "224d0eee53c2aa5d426d2c9b7fa5d843a47cf1db", "title": "Probabilistic Elastic Matching for Pose Variant Face Verification", "year": 2013, "pdf": "http://www.ifp.illinois.edu/~jyang29/papers/CVPR13-PEM.pdf"}, {"id": "1d3e01d5e2721dcfafe5a3b39c54ee1c980350bb", "title": "Face Alignment by Explicit Shape Regression", "year": 2012, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6248015"}, {"id": "634541661d976c4b82d590ef6d1f3457d2857b19", "title": "Advanced Techniques for Face Recognition under Challenging Environments", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/6345/41661d976c4b82d590ef6d1f3457d2857b19.pdf"}, {"id": "7cee0311e71dca540aaf3d87bef3a6c97ca39bc3", "title": "The performance of proposed deep residual learning network of images", "year": 2017, "pdf": null}, {"id": "3826e47f0572ab4d0fe34f0ed6a49aa8303e0428", "title": "Joint Alignment and Clustering via Low-Rank Representation", "year": 2013, "pdf": "https://doi.org/10.1109/ACPR.2013.66"}, {"id": "5506a1a1e1255353fde05d9188cb2adc20553af5", "title": "Dictionary Integration using 3D Morphable Face Models for Pose-invariant Collaborative-representation-based Classification", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/ff69/cb49c8cb86d0afadbcfa0baa607d7065965a.pdf"}, {"id": "85188c77f3b2de3a45f7d4f709b6ea79e36bd0d9", "title": "Combined model for detecting, localizing, interpreting and recognizing faces", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/8518/8c77f3b2de3a45f7d4f709b6ea79e36bd0d9.pdf"}, {"id": "b166ce267ddb705e6ed855c6b679ec699d62e9cb", "title": "Sample group and misplaced atom dictionary learning for face recognition", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/b166/ce267ddb705e6ed855c6b679ec699d62e9cb.pdf"}, {"id": "bbf20adb59b7461e0d040e665bf64ae5f478eda0", "title": "Automated face swapping and its detection", "year": 2017, "pdf": null}, {"id": "9472338240929e1ed38e52e029dbfa85a42ae095", "title": "Memristive Fully Convolutional Network: An Accurate Hardware Image-Segmentor in Deep Learning", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8471011"}, {"id": "b69ff748b1cc7da3843acdd7f1c33f0c0debf3f5", "title": "Automatic face recognition system based on the SIFT features", "year": "2015", "pdf": "http://doi.org/10.1016/j.compeleceng.2015.01.014"}, {"id": "3ebb0209d5e99b22c67e425a67a959f4db8d1f47", "title": "Subspace-Based Convolutional Network for Handwritten Character Recognition", "year": 2017, "pdf": "https://doi.org/10.1109/ICDAR.2017.173"}, {"id": "942fd0b406fe1d24b50d745cd31fd31220c78f0c", "title": "Discriminative graph regularized broad learning system for image recognition", "year": "2017", "pdf": "http://doi.org/10.1007/s11432-017-9421-3"}, {"id": "86614c2d2f6ebcb9c600d4aef85fd6bf6eab6663", "title": "Benchmarks for Cloud Robotics", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/8661/4c2d2f6ebcb9c600d4aef85fd6bf6eab6663.pdf"}, {"id": "66886997988358847615375ba7d6e9eb0f1bb27f", "title": "Prototype-Based Discriminative Feature Learning for Kinship Verification", "year": 2015, "pdf": "https://pdfs.semanticscholar.org/6688/6997988358847615375ba7d6e9eb0f1bb27f.pdf"}, {"id": "c808c784237f167c78a87cc5a9d48152579c27a4", "title": "Know You at One Glance: A Compact Vector Representation for Low-Shot Learning", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265437"}, {"id": "004d5491f673cd76150f43b0a0429214f5bfd823", "title": "Learning to name faces: a multimodal learning scheme for search-based face annotation", "year": 2013, "pdf": "http://www.cais.ntu.edu.sg/~chhoi/paper_pdf/fp130-wang.pdf"}, {"id": "b0502dcc6df378ee3ddeefeeb1cc51a20e04f39b", "title": "Local-Gravity-Face (<italic>LG-face</italic>) for Illumination-Invariant and Heterogeneous Face Recognition", "year": 2016, "pdf": null}, {"id": "e97ba85a4550667b8a28f83a98808d489e0ff3bc", "title": "A Research on Fast Face Feature Points Detection on Smart Mobile Devices", "year": "2018", "pdf": "http://doi.org/10.1155/2018%2F9729014"}, {"id": "3cfbe1f100619a932ba7e2f068cd4c41505c9f58", "title": "A Realistic Simulation Tool for Testing Face Recognition Systems under Real-World Conditions", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/3cfb/e1f100619a932ba7e2f068cd4c41505c9f58.pdf"}, {"id": "b3e521baceadee36ac22b6a06266e8abd6a701f7", "title": "Occlusion-Aware 3D Morphable Models and an Illumination Prior for Face Image Analysis", "year": "2018", "pdf": "http://doi.org/10.1007/s11263-018-1064-8"}, {"id": "17de5a9ce09f4834629cd76b8526071a956c9c6d", "title": "Smart Parental Advisory: A Usage Control and Deep Learning-Based Framework for Dynamic Parental Control on Smart TV", "year": 2017, "pdf": "https://doi.org/10.1007/978-3-319-68063-7_8"}, {"id": "2042f1cacea262ec924f74994e49d5e87d9d0445", "title": "A survey of homeland security biometrics and forensics research", "year": 2016, "pdf": null}, {"id": "f1aa120fb720f6cfaab13aea4b8379275e6d40a2", "title": "InverseFaceNet: Deep Single-Shot Inverse Face Rendering From A Single Image", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/f1aa/120fb720f6cfaab13aea4b8379275e6d40a2.pdf"}, {"id": "20111924fbf616a13d37823cd8712a9c6b458cd6", "title": "Linear Regression Line based Partial Face Recognition", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/2011/1924fbf616a13d37823cd8712a9c6b458cd6.pdf"}, {"id": "5951e9e13ff99f97f301a336f24a14d80459c659", "title": "Joint Bayesian guided metric learning for end-to-end face verification", "year": 2018, "pdf": "https://doi.org/10.1016/j.neucom.2017.09.009"}, {"id": "e465f596d73f3d2523dbf8334d29eb93a35f6da0", "title": "On Face Segmentation, Face Swapping, and Face Perception", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/e465/f596d73f3d2523dbf8334d29eb93a35f6da0.pdf"}, {"id": "a74251efa970b92925b89eeef50a5e37d9281ad0", "title": "Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization", "year": 2011, "pdf": "http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf"}, {"id": "5dd146912c2b2a313cea50acdcca3b4b54479142", "title": "Improving Human Action Recognition through Hierarchical Neural Network Classifiers", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8489663"}, {"id": "dcb6f06631021811091ce691592b12a237c12907", "title": "SeaShips: A Large-Scale Precisely Annotated Dataset for Ship Detection", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8438999"}, {"id": "ef032afa4bdb18b328ffcc60e2dc5229cc1939bc", "title": "Attribute-enhanced metric learning for face retrieval", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/ef03/2afa4bdb18b328ffcc60e2dc5229cc1939bc.pdf"}, {"id": "e724c9a69613bef36f67ae7ed6850b1942918804", "title": "Recognition of Faces Using Discriminative Features of LBP and HOG Descriptor in Varying Environment", "year": 2015, "pdf": null}, {"id": "972e044f69443dfc5c987e29250b2b88a6d2f986", "title": "Face model fitting with learned displacement experts and multi-band images", "year": "2011", "pdf": "http://doi.org/10.1134/S1054661811020738"}, {"id": "5be3cc1650c918da1c38690812f74573e66b1d32", "title": "Relative Parts: Distinctive Parts for Learning Relative Attributes", "year": 2014, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Sandeep_Relative_Parts_Distinctive_2014_CVPR_paper.pdf"}, {"id": "c4c1fb882ae8b48c461e1f7c359ea3ea15da29fa", "title": "Gender classification using bayesian classifier with local binary patch features", "year": 2012, "pdf": null}, {"id": "d340a135a55ecf7506010e153d5f23155dcfa7e8", "title": "MAVI: An Embedded Device to Assist Mobility of Visually Impaired", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7884781"}, {"id": "2cbb4a2f8fd2ddac86f8804fd7ffacd830a66b58", "title": "Age and gender classification using convolutional neural networks", "year": 2015, "pdf": "http://www.openu.ac.il/home/hassner/projects/cnn_agegender/CNN_AgeGenderEstimation.pdf"}, {"id": "e1b656c846a360d816a9f240499ec4f306897b98", "title": "Face Recognition and Spoofing Detection System Adapted To Visually-Impaired People", "year": 2016, "pdf": null}, {"id": "788a7b59ea72e23ef4f86dc9abb4450efefeca41", "title": "Robust Statistical Face Frontalization", "year": 2015, "pdf": "http://eprints.eemcs.utwente.nl/26840/01/Pantic_Robust_Statistical_Face_Frontalization.pdf"}, {"id": "4df34e0194faa27078832cb5078a2af6c9d0ea9b", "title": "Saliency Prediction in the Deep Learning Era: An Empirical Investigation", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.03716.pdf"}, {"id": "90d735cffd84e8f2ae4d0c9493590f3a7d99daf1", "title": "Recognition of Faces using Efficient Multiscale Local Binary Pattern and Kernel Discriminant Analysis in Varying Environment", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/90d7/35cffd84e8f2ae4d0c9493590f3a7d99daf1.pdf"}, {"id": "0f65c91d0ed218eaa7137a0f6ad2f2d731cf8dab", "title": "Multi-Directional Multi-Level Dual-Cross Patterns for Robust Face Recognition", "year": 2016, "pdf": "http://arxiv.org/pdf/1401.5311v1.pdf"}, {"id": "96b6f8ac898c8ef6b947c50bb66fe6b1e6f2fb11", "title": "Simultaneous classification of several features of a person\u2019s appearance using a deep convolutional neural network", "year": 2015, "pdf": null}, {"id": "1ffe20eb32dbc4fa85ac7844178937bba97f4bf0", "title": "Face Clustering: Representation and Pairwise Constraints", "year": "2018", "pdf": "https://arxiv.org/pdf/1706.05067.pdf"}, {"id": "d878a67b2ef6a0a5dec72db15291f12419040ab1", "title": "Using web images as additional training resource for the discriminative generalized hough transform", "year": 2016, "pdf": "https://doi.org/10.1109/IPTA.2016.7821012"}, {"id": "9b1a70d6771547cbcf6ba646f8775614c0162aca", "title": "Combining feature extraction and expansion to improve classification based similarity learning", "year": 2017, "pdf": "https://doi.org/10.1016/j.patrec.2016.11.005"}, {"id": "7df277c37ac75851684f926fd3fb4daced3e79f8", "title": "Gaussian elliptical fitting based skin color modeling for human detection", "year": 2017, "pdf": null}, {"id": "43aa40eaa59244c233f83d81f86e12eba8d74b59", "title": "Fast pose invariant face recognition using super coupled multiresolution Markov Random Fields on a GPU", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/43aa/40eaa59244c233f83d81f86e12eba8d74b59.pdf"}, {"id": "67c703a864aab47eba80b94d1935e6d244e00bcb", "title": "Face Retrieval Based On Local Binary Pattern and Its Variants: A Comprehensive Study", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/67c7/03a864aab47eba80b94d1935e6d244e00bcb.pdf"}, {"id": "4d90d7834ae25ee6176c096d5d6608555766c0b1", "title": "Face and Body Association for Video-Based Face Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354115"}, {"id": "6a16b91b2db0a3164f62bfd956530a4206b23fea", "title": "A Method for Real-Time Eye Blink Detection and Its Application", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/6a16/b91b2db0a3164f62bfd956530a4206b23fea.pdf"}, {"id": "90fb58eeb32f15f795030c112f5a9b1655ba3624", "title": "Face and Iris Recognition in a Video Sequence Using Dbpnn and Adaptive Hamming Distance", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/90fb/58eeb32f15f795030c112f5a9b1655ba3624.pdf"}, {"id": "5da740682f080a70a30dc46b0fc66616884463ec", "title": "Real-Time Head Pose Estimation Using Multi-variate RVM on Faces in the Wild", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/5da7/40682f080a70a30dc46b0fc66616884463ec.pdf"}, {"id": "2c6e65d8ef8c17387b839ab6a82fb469117ae396", "title": "Application of deep learning to computer vision: A comprehensive study", "year": 2016, "pdf": null}, {"id": "9ca93ad6200bfa9dd814ac64bfb1044c3a0c01ce", "title": "Noise adaptive binary pattern for face image analysis", "year": 2015, "pdf": null}, {"id": "26b606ac6beb2977a7853b032416c23c7b36cb8a", "title": "Multiscale binarised statistical image features for symmetric unconstrained face matching", "year": 2014, "pdf": null}, {"id": "25866eb48b94e85fa675b1d393163d27ffd62ba6", "title": "Multiple feature subspaces analysis for single sample per person face recognition", "year": 2017, "pdf": null}, {"id": "3046baea53360a8c5653f09f0a31581da384202e", "title": "Deformable Face Alignment via Local Measurements and Global Constraints", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/3046/baea53360a8c5653f09f0a31581da384202e.pdf"}, {"id": "5b0ebb8430a04d9259b321fc3c1cc1090b8e600e", "title": "The One-Shot similarity kernel", "year": 2009, "pdf": "http://www.openu.ac.il/home/hassner/projects/Ossk/WolfHassnerTaigman_ICCV09.pdf"}, {"id": "5f01f14ca354266106d8aa1b07c45e8c9ac3e273", "title": "RFIW 2017: LPQ-SIEDA for Large Scale Kinship Verification", "year": 2017, "pdf": null}, {"id": "492f3def325296164cd32b80d19a591b72b480cd", "title": "Metric Learning", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/492f/3def325296164cd32b80d19a591b72b480cd.pdf"}, {"id": "17aa78bd4331ef490f24bdd4d4cd21d22a18c09c", "title": "Appendix: Building high-level features using large scale unsupervised learning", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/17aa/78bd4331ef490f24bdd4d4cd21d22a18c09c.pdf"}, {"id": "eac6aee477446a67d491ef7c95abb21867cf71fc", "title": "A Survey of Sparse Representation: Algorithms and Applications", "year": "2015", "pdf": "https://arxiv.org/pdf/1602.07017.pdf"}, {"id": "aa577652ce4dad3ca3dde44f881972ae6e1acce7", "title": "Deep Attribute Networks", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/aa57/7652ce4dad3ca3dde44f881972ae6e1acce7.pdf"}, {"id": "21b16df93f0fab4864816f35ccb3207778a51952", "title": "Recognition of Static Gestures Applied to Brazilian Sign Language (Libras)", "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2015.26"}, {"id": "d38b32d91d56b01c77ef4dd7d625ce5217c6950b", "title": "Unconstrained gender classification by multi-resolution LPQ and SIFT", "year": 2016, "pdf": null}, {"id": "351158e4481e3197bd63acdafd73a5df8336143b", "title": "Measuring Gender Bias in News Images", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/3511/58e4481e3197bd63acdafd73a5df8336143b.pdf"}, {"id": "3e4acf3f2d112fc6516abcdddbe9e17d839f5d9b", "title": "Deep Value Networks Learn to Evaluate and Iteratively Refine Structured Outputs", "year": "2017", "pdf": "https://arxiv.org/pdf/1703.04363.pdf"}, {"id": "a55c0810e6c84f8e51953c0d8fd9971696d205f0", "title": "Unconstrained face verification assisted by pairwise visual pre-estimation on key facial points", "year": 2014, "pdf": null}, {"id": "1672becb287ae3eaece3e216ba37677ed045db55", "title": "Fully automatic face normalization and single sample face recognition in unconstrained environments", "year": 2016, "pdf": "https://doi.org/10.1016/j.eswa.2015.10.047"}, {"id": "9c781f7fd5d8168ddae1ce5bb4a77e3ca12b40b6", "title": "Attribute Based Face Classification Using Support Vector Machine", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/9c78/1f7fd5d8168ddae1ce5bb4a77e3ca12b40b6.pdf"}, {"id": "55cad1f4943018459b761f89afd9292d347610f2", "title": "Self-supervised Multi-level Face Model Learning for Monocular Reconstruction at over 250 Hz", "year": "2017", "pdf": "https://arxiv.org/pdf/1712.02859.pdf"}, {"id": "df310591dfba9672252d693bc87da73c246749c9", "title": "Fusion of Holistic and Part Based Features for Gender Classification in the Wild", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/df31/0591dfba9672252d693bc87da73c246749c9.pdf"}, {"id": "0dd74bbda5dd3d9305636d4b6f0dad85d6e19572", "title": "Heterogeneous Face Attribute Estimation: A Deep Multi-Task Learning Approach", "year": "2018", "pdf": "https://arxiv.org/pdf/1706.00906.pdf"}, {"id": "1280b35e4a20036fcfd82ee09f45a3fca190276f", "title": "Face Verification Based on Feature Transfer via PCA-SVM Framework", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/iThings-GreenCom-CPSCom-SmartData.2017.166"}, {"id": "8e33183a0ed7141aa4fa9d87ef3be334727c76c0", "title": "Robustness of Face Recognition to Image Manipulations", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/8e33/183a0ed7141aa4fa9d87ef3be334727c76c0.pdf"}, {"id": "0750a816858b601c0dbf4cfb68066ae7e788f05d", "title": "CosFace: Large Margin Cosine Loss for Deep Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1801.09414.pdf"}, {"id": "b2749caec0094e186d3ee850151c899b8508f47a", "title": "AVIUE — Artificial vision to improve the user experience", "year": 2013, "pdf": null}, {"id": "4563b46d42079242f06567b3f2e2f7a80cb3befe", "title": "VADANA: A dense dataset for facial image analysis", "year": 2011, "pdf": "http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf"}, {"id": "7ce03597b703a3b6754d1adac5fbc98536994e8f", "title": "On the Intrinsic Dimensionality of Face Representation", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/7ce0/3597b703a3b6754d1adac5fbc98536994e8f.pdf"}, {"id": "f45d6a7bdb6741242da6192d18c97ac39e6308db", "title": "Person-specific face recognition in unconstrained environments: a combination of offline and online learning", "year": 2008, "pdf": "http://media.cs.tsinghua.edu.cn/~imagevision/papers/%5B2008%5D%5Bfg%5DPerson-Specific%20Face%20Recognition%20in%20Unconstrained%20Environments%20a%20Combination%20of%20Offline%20and%20Online%20Learning.pdf"}, {"id": "3ed46ef5344927a30d71089ae203c9a9e35e4977", "title": "Face detection: A deep convolutional network method based on grouped facial part", "year": 2017, "pdf": null}, {"id": "f4fc77660665ae58993065c6a336367e9a6c85f7", "title": "Biview face recognition in the shape-texture domain", "year": 2013, "pdf": "https://doi.org/10.1016/j.patcog.2012.12.009"}, {"id": "4686bdcee01520ed6a769943f112b2471e436208", "title": "Fast search based on generalized similarity measure", "year": 2017, "pdf": "http://ipsjcva.springeropen.com/track/pdf/10.1186/s41074-017-0024-5?site=ipsjcva.springeropen.com"}, {"id": "f2da0ed6dcb86a8c0ddc8c13245272a726cec480", "title": "Facial recognition application for border control", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8489113"}, {"id": "684f5166d8147b59d9e0938d627beff8c9d208dd", "title": "Discriminative Block-Diagonal Representation Learning for Image Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1707.03548.pdf"}, {"id": "853bd61bc48a431b9b1c7cab10c603830c488e39", "title": "Learning Face Representation from Scratch", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf"}, {"id": "47e14fdc6685f0b3800f709c32e005068dfc8d47", "title": "Secure Face Matching Using Fully Homomorphic Encryption", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.00577.pdf"}, {"id": "8964524580ea2cff41a6b5858b623788bbefb8a4", "title": "A simple and improvable method for face region extraction", "year": 2008, "pdf": null}, {"id": "e855856d4b61b6a732005418f543c49195cb1542", "title": "Novel Method for Eyeglasses Detection in Frontal Face Images", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/e855/856d4b61b6a732005418f543c49195cb1542.pdf"}, {"id": "c6eb026d3a0081f4cb5cde16d3170f8ecf8ce706", "title": "Face Recognition: From Traditional to Deep Learning Methods", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.00116.pdf"}, {"id": "5c92355b2808621d237a89dc7b3faa5cdb990ab5", "title": "Dynamic Amelioration of Resolution Mismatches for Local Feature Based Identity Inference", "year": 2010, "pdf": "http://www.researchgate.net/profile/Brian_Lovell2/publication/236124723_Dynamic_Amelioration_of_Resolution_Mismatches_for_Local_Feature_Based_Identity_Inference/links/0fcfd50741a027e848000000.pdf"}, {"id": "da8d0855e7760e86fbec47a3cfcf5acd8c700ca8", "title": "F 2 ConText : How to Extract Holistic Contexts of Persons of Interest for Enhancing Exploratory Analysis", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/da8d/0855e7760e86fbec47a3cfcf5acd8c700ca8.pdf"}, {"id": "c8db8764f9d8f5d44e739bbcb663fbfc0a40fb3d", "title": "Modeling for part-based visual object detection based on local features", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/c8db/8764f9d8f5d44e739bbcb663fbfc0a40fb3d.pdf"}, {"id": "3d4d3f70352dc833e454a5756d682f27eca46e5d", "title": "Fast k-Nearest Neighbor Search for Face Identification Using Bounds of Residual Score", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.32"}, {"id": "cc3c273bb213240515147e8be68c50f7ea22777c", "title": "Gaining Insight Into Films Via Topic Modeling & Visualization", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/cc3c/273bb213240515147e8be68c50f7ea22777c.pdf"}, {"id": "4bc4a7c4142e8b37389fddd1e2338298b8b56e96", "title": "Confidence assessment of face recognition results", "year": 2015, "pdf": null}, {"id": "9d757c0fede931b1c6ac344f67767533043cba14", "title": "Search Based Face Annotation Using PCA and Unsupervised Label Refinement Algorithms", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/9d75/7c0fede931b1c6ac344f67767533043cba14.pdf"}, {"id": "dbe255d3d2a5d960daaaba71cb0da292e0af36a7", "title": "Evolutionary Cost-Sensitive Extreme Learning Machine", "year": "2017", "pdf": "https://arxiv.org/pdf/1505.04373.pdf"}, {"id": "0a60d9d62620e4f9bb3596ab7bb37afef0a90a4f", "title": "Chimpanzee Faces in the Wild: Log-Euclidean CNNs for Predicting Identities and Attributes of Primates", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/0de4/0e8adc31a15af7496c92f261f9f703afed1d.pdf"}, {"id": "22137ce9c01a8fdebf92ef35407a5a5d18730dde", "title": "Recognition of Faces from single and Multi-View Videos", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/2213/7ce9c01a8fdebf92ef35407a5a5d18730dde.pdf"}, {"id": "28d4e027c7e90b51b7d8908fce68128d1964668a", "title": "Level Playing Field for Million Scale Face Recognition", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.00393.pdf"}, {"id": "90e56a8515c8c2ff16f5c79c69811e283be852c7", "title": "Boosting face recognition via neural Super-Resolution", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/90e5/6a8515c8c2ff16f5c79c69811e283be852c7.pdf"}, {"id": "87f285782d755eb85d8922840e67ed9602cfd6b9", "title": "Incorporating Boltzmann Machine Priors for Semantic Labeling in Images and Videos", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/87f2/85782d755eb85d8922840e67ed9602cfd6b9.pdf"}, {"id": "06f146dfcde10915d6284981b6b84b85da75acd4", "title": "Scalable Face Image Retrieval Using Attribute-Enhanced Sparse Codewords", "year": 2013, "pdf": "http://cmlab.csie.ntu.edu.tw/~sirius42/papers/tmm12.pdf"}, {"id": "3dda181be266950ba1280b61eb63ac11777029f9", "title": "When Celebrities Endorse Politicians: Analyzing the Behavior of Celebrity Followers in the 2016 U.S. Presidential Election", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/3dda/181be266950ba1280b61eb63ac11777029f9.pdf"}, {"id": "d2baa43471d959075fc4c93485643cbd009797fd", "title": "Low-Power Convolutional Neural Network Processor for a Face-Recognition System", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/MM.2017.4241350"}, {"id": "96fc15d01a202446179546a5bea8106a414232a7", "title": "Computer Vision", "year": "2014", "pdf": "http://doi.org/10.1007/978-0-387-31439-6"}, {"id": "5dd3c9ac3c6d826e17c5b378d1575b68d02432d7", "title": "A survey on soft Biometrics and their application in person recognition at a distance", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7292416"}, {"id": "13fd0a4d06f30a665fc0f6938cea6572f3b496f7", "title": "Regularized Extreme Learning Machine for Large-scale Media Content Analysis", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/13fd/0a4d06f30a665fc0f6938cea6572f3b496f7.pdf"}, {"id": "89272b78b651038ff4d294b9ccca0018d2c9033b", "title": "Low Computation Face Verification Using Class Center Analysis", "year": 2014, "pdf": "https://doi.org/10.1109/ICPR.2014.777"}, {"id": "6b35b15ceba2f26cf949f23347ec95bbbf7bed64", "title": "RSILC: Rotation- and Scale-Invariant, Line-based Color-aware descriptor", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/6b35/b15ceba2f26cf949f23347ec95bbbf7bed64.pdf"}, {"id": "a6d47f7aa361ab9b37c7f3f868280318f355fadc", "title": "Features and methods for improving large scale face recognition", "year": "2015", "pdf": "https://ora.ox.ac.uk/objects/uuid:7704244a-b327-4e5c-a58e-7bfe769ed988"}, {"id": "4d6c3a3f9410ca35eb3389ec7088f5e2c16ec3ea", "title": "Static facial expression analysis in tough conditions: Data, evaluation protocol and benchmark", "year": 2011, "pdf": "http://www.researchgate.net/profile/Roland_Goecke/publication/221429947_Static_facial_expression_analysis_in_tough_conditions_Data_evaluation_protocol_and_benchmark/links/0fcfd50e81697312d6000000.pdf"}, {"id": "178a82e3a0541fa75c6a11350be5bded133a59fd", "title": "BioHDD: a dataset for studying biometric identification on heavily degraded data", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/178a/82e3a0541fa75c6a11350be5bded133a59fd.pdf"}, {"id": "ac206a97e981df4514dcae28442beaea31845f35", "title": "A robust system for eye state recognition using the Hamming distances of eye image intensities", "year": 2017, "pdf": null}, {"id": "7918698ffa86cdd6123bc2f1f613be1ab38c0d2f", "title": "Learning to Recognize Faces in Realistic Conditions", "year": "2013", "pdf": "https://pdfs.semanticscholar.org/7918/698ffa86cdd6123bc2f1f613be1ab38c0d2f.pdf"}, {"id": "fdaf65b314faee97220162980e76dbc8f32db9d6", "title": "Face recognition using both visible light image and near-infrared image and a deep network", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/fdaf/65b314faee97220162980e76dbc8f32db9d6.pdf"}, {"id": "68bf34e383092eb827dd6a61e9b362fcba36a83a", "title": "Multi-view, High-resolution Face Image Analysis", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/68bf/34e383092eb827dd6a61e9b362fcba36a83a.pdf"}, {"id": "e692870efb009da4b9316678b354ae935fdf48eb", "title": "An Efficient and Robust Gender Classification System", "year": 2015, "pdf": null}, {"id": "f1280f76933ba8b7f4a6b8662580504f02bb4ab6", "title": "Gender Classification by Deep Learning on Millions of Weakly Labelled Images", "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7836703"}, {"id": "6bb95a0f3668cd36407c85899b71c9fe44bf9573", "title": "Face Attribute Prediction Using Off-The-Shelf Deep Learning Networks", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/6bb9/5a0f3668cd36407c85899b71c9fe44bf9573.pdf"}, {"id": "2dd6c988b279d89ab5fb5155baba65ce4ce53c1e", "title": "Learning deformable shape manifolds", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/2dd6/c988b279d89ab5fb5155baba65ce4ce53c1e.pdf"}, {"id": "68484ae8a042904a95a8d284a7f85a4e28e37513", "title": "Spoofing Deep Face Recognition with Custom Silicone Masks", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/6848/4ae8a042904a95a8d284a7f85a4e28e37513.pdf"}, {"id": "d78077a7aa8a302d4a6a09fb9737ab489ae169a6", "title": "Robust face recognition with structural binary gradient patterns", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/d780/77a7aa8a302d4a6a09fb9737ab489ae169a6.pdf"}, {"id": "b558be7e182809f5404ea0fcf8a1d1d9498dc01a", "title": "Bottom-up and top-down reasoning with convolutional latent-variable models", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/dc8a/57827ffbe7064979638cf909abf7fcf7fb8d.pdf"}, {"id": "1d776bfe627f1a051099997114ba04678c45f0f5", "title": "Deployment of Customized Deep Learning based Video Analytics On Surveillance Cameras", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.10604.pdf"}, {"id": "51348e24d2199b06273e7b65ae5f3fc764a2efc7", "title": "Scalable $k$-NN graph construction", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/c4b4/cbc801a4430be5fdd16ae34c68f53f772582.pdf"}, {"id": "a4a0b5f08198f6d7ea2d1e81bd97fea21afe3fc3", "title": "Efficient Recurrent Residual Networks Improved by Feature Transfer", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/a4a0/b5f08198f6d7ea2d1e81bd97fea21afe3fc3.pdf"}, {"id": "a98316980b126f90514f33214dde51813693fe0d", "title": "Collaborations on YouTube: From Unsupervised Detection to the Impact on Video and Channel Popularity", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.01887.pdf"}, {"id": "c3fb2399eb4bcec22723715556e31c44d086e054", "title": "Face recognition based on SIGMA sets of image features", "year": 2014, "pdf": "https://doi.org/10.1109/ICASSP.2014.6853646"}, {"id": "0ee661a1b6bbfadb5a482ec643573de53a9adf5e", "title": "On the Use of Discriminative Cohort Score Normalization for Unconstrained Face Recognition", "year": 2014, "pdf": "http://epubs.surrey.ac.uk/812523/1/yunlian_TIFS2014.pdf"}, {"id": "edcb662834aae8878a209c769ed664f8bd48b751", "title": "Imagining the Unimaginable Faces by Deconvolutional Networks", "year": 2018, "pdf": null}, {"id": "7a85b3ab0efb6b6fcb034ce13145156ee9d10598", "title": "Inter-image outliers and their application to image classification", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/7a85/b3ab0efb6b6fcb034ce13145156ee9d10598.pdf"}, {"id": "5bae9822d703c585a61575dced83fa2f4dea1c6d", "title": "MOTChallenge 2015: Towards a Benchmark for Multi-Target Tracking", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/5bae/9822d703c585a61575dced83fa2f4dea1c6d.pdf"}, {"id": "9649a19b49607459cef32f43db4f6e6727080bdb", "title": "Offset Neural Network for Document Orientation Identification", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8395207"}, {"id": "5fff61302adc65d554d5db3722b8a604e62a8377", "title": "Additive Margin Softmax for Face Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1801.05599.pdf"}, {"id": "0341405252c80ff029a0d0065ca46d0ade943b03", "title": "A Coupled Encoder-Decoder Network for Joint Face Detection and Landmark Localization", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.40"}, {"id": "3c086601ce0bac61047b5b931b253bd4035e1e7a", "title": "Occlusion handling in feature point tracking using ranked parts based models", "year": 2015, "pdf": "https://doi.org/10.1109/ICIP.2015.7350897"}, {"id": "7f3a73babe733520112c0199ff8d26ddfc7038a0", "title": "Robust Face Identification with Small Sample Sizes using Bag of Words and Histogram of Oriented Gradients", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/7f3a/73babe733520112c0199ff8d26ddfc7038a0.pdf"}, {"id": "48853c25dc75481b0c77f408a8a76383287ebe2a", "title": "Facial landmark detection in uncontrolled conditions", "year": 2011, "pdf": "https://doi.org/10.1109/IJCB.2011.6117477"}, {"id": "4c19690889fb3a12ec03e65bae6f5f20420b4ba4", "title": "Robust facial landmark detection using mixture of discriminative visibility-aware models", "year": 2016, "pdf": "https://doi.org/10.1049/iet-ipr.2015.0699"}, {"id": "b84ccf1c07c6d2061c8aadaca3dfc4e7d41cc1c9", "title": "Dynamic Feature Matching for Partial Face Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8467369"}, {"id": "f6b4811c5e7111485e2c9cc5bf63f8ac80f3e2d7", "title": "Face Verification via Class Sparsity Based Supervised Encoding", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2016.2569436"}, {"id": "878301453e3d5cb1a1f7828002ea00f59cbeab06", "title": "Faceness-Net: Face Detection through Deep Facial Part Responses", "year": "2018", "pdf": "https://arxiv.org/pdf/1701.08393.pdf"}, {"id": "ec00ecb64fa206cea8b2e716955a738a96424084", "title": "Intelligent Synthesis Driven Model Calibration: Framework and Face Recognition Application", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265512"}, {"id": "765be0c44a67e41e0f8f0b5d8a3af0ff40a00c7d", "title": "Cross-Generating GAN for Facial Identity Preserving", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373821"}, {"id": "0d3ff34d8490a9a53de1aac1dea70172cb02e013", "title": "Cross-Database Evaluation of Normalized Raw Pixels for Gender Recognition under Unconstrained Settings", "year": 2014, "pdf": "https://doi.org/10.1109/ICPR.2014.542"}, {"id": "24a20ebfe86859e0d91c2b44188f115b58ba8d9c", "title": "The Dark Side of the Face: Exploring the Ultraviolet Spectrum for Face Biometrics", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411220"}, {"id": "26727dc7347e3338d22e8cf6092e3a3c7568d763", "title": "Discriminative low-rank metric learning for face recognition", "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163088"}, {"id": "1a9337d70a87d0e30966ecd1d7a9b0bbc7be161f", "title": "A novel binary adaptive weight GSA based feature selection for face recognition using local gradient patterns, modified census transform, and local binary patterns", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/1a93/37d70a87d0e30966ecd1d7a9b0bbc7be161f.pdf"}, {"id": "2c92839418a64728438c351a42f6dc5ad0c6e686", "title": "Pose-Aware Face Recognition in the Wild", "year": 2016, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Masi_Pose-Aware_Face_Recognition_CVPR_2016_paper.pdf"}, {"id": "fcf8bb1bf2b7e3f71fb337ca3fcf3d9cf18daa46", "title": "Feature Selection via Sparse Approximation for Face Recognition", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/fcf8/bb1bf2b7e3f71fb337ca3fcf3d9cf18daa46.pdf"}, {"id": "0b82bf595e76898993ed4f4b2883c42720c0f277", "title": "Improving Face Recognition by Exploring Local Features with Visual Attention", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411229"}, {"id": "5a8f96f6906af8fbf73810b88c68b84a31555f60", "title": "Iterative Grassmannian optimization for robust image alignment", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/6403/7f1a3a104c4545fafe006f24d3452c007662.pdf"}, {"id": "f834c50e249c9796eb7f03da7459b71205dc0737", "title": "Enhanced Patterns of Oriented Edge Magnitudes for Face Recognition and Image Matching", "year": 2012, "pdf": "https://doi.org/10.1109/TIP.2011.2166974"}, {"id": "404776aa18031828f3d5dbceed39907f038a47fe", "title": "Sparsely encoded local descriptor for face verification", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/4047/76aa18031828f3d5dbceed39907f038a47fe.pdf"}, {"id": "03f7041515d8a6dcb9170763d4f6debd50202c2b", "title": "Clustering Millions of Faces by Identity", "year": 2018, "pdf": "http://biometrics.cse.msu.edu/Publications/Face/OttoWangJain_ClusteringMillionsOfFacesByIdentity_TPAMI17.pdf"}, {"id": "1b4b3d0ce900996a6da8928e16370e21d15ed83e", "title": "A Review of Performance Evaluation on 2D Face Databases", "year": 2017, "pdf": "https://doi.org/10.1109/BigDataService.2017.38"}, {"id": "c81ee278d27423fd16c1a114dcae486687ee27ff", "title": "Search Based Face Annotation Using Weakly Labeled Facial Images", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/c81e/e278d27423fd16c1a114dcae486687ee27ff.pdf"}, {"id": "32743e72cdb481b7a30a3d81a96569dcbea4e409", "title": "Ultra Power-Efficient CNN Domain Specific Accelerator with 9.3TOPS/Watt for Mobile and Embedded Applications", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.00361.pdf"}, {"id": "a9af0dc1e7a724464d4b9d174c9cf2441e34d487", "title": "Gabor-scale binary pattern for face recognition", "year": 2016, "pdf": "https://doi.org/10.1142/S0219691316500351"}, {"id": "ada73060c0813d957576be471756fa7190d1e72d", "title": "VRPBench: A Vehicle Routing Benchmark Tool", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/ada7/3060c0813d957576be471756fa7190d1e72d.pdf"}, {"id": "47e8db3d9adb79a87c8c02b88f432f911eb45dc5", "title": "MAGMA: Multilevel Accelerated Gradient Mirror Descent Algorithm for Large-Scale Convex Composite Minimization", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/5f99/63990ab7dd888ab33393f712f8d5c1463348.pdf"}, {"id": "1da83903c8d476c64c14d6851c85060411830129", "title": "Iterated Support Vector Machines for Distance Metric Learning", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/90c3/b003b85bd60ae06630bcef6abc03c3b1ef96.pdf"}, {"id": "2fce767ad830e0203d62ce30bbe75213b959d19c", "title": "Histogram of Log-Gabor Magnitude Patterns for face recognition", "year": 2014, "pdf": "http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p519-yi.pdf"}, {"id": "1b27ca161d2e1d4dd7d22b1247acee5c53db5104", "title": "Facial soft biometric features for forensic face recognition.", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/1b27/ca161d2e1d4dd7d22b1247acee5c53db5104.pdf"}, {"id": "fbd7d591e6eecb9a947e377d5b1a865a9f86a11f", "title": "Consensual and Privacy-Preserving Sharing of Multi-Subject and Interdependent Data", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/fbd7/d591e6eecb9a947e377d5b1a865a9f86a11f.pdf"}, {"id": "62c435bc714f13a373926e3b1914786592ed1fef", "title": "MAVI: An Embedded Device to Assist Mobility of Visually Impaired", "year": 2017, "pdf": "http://assistech.iitd.ernet.in/mavi-embedded-device.pdf"}, {"id": "dc13229afbbc8b7a31ed5adfe265d971850c0976", "title": "Learning from Millions of 3 D Scans for Large-scale 3 D Face Recognition", "year": "2017", "pdf": null}, {"id": "234c106036964131c0f2daf76c47ced802652046", "title": "Adaptive facial point detection and emotion recognition for a humanoid robot", "year": "2015", "pdf": "http://doi.org/10.1016/j.cviu.2015.07.007"}, {"id": "0a85bdff552615643dd74646ac881862a7c7072d", "title": "Beyond frontal faces: Improving Person Recognition using multiple cues", "year": 2015, "pdf": "https://doi.org/10.1109/CVPR.2015.7299113"}, {"id": "ad27d13d163757b65110f98a0e7dd7f5bc8c8030", "title": "Experiments on the LFW database using curvelet transforms and a random forest-kNN cascade", "year": 2012, "pdf": null}, {"id": "b28346f6a962c6bbe309c891cfe04c90b97c1fc4", "title": "Iterative online subspace learning for robust image alignment", "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553759"}, {"id": "97d811ae99bcbcf9f63c2f447041ab6d74a20b1e", "title": "Face recognition using truncated transform domain feature extraction", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/97d8/11ae99bcbcf9f63c2f447041ab6d74a20b1e.pdf"}, {"id": "dbc04694ef17c83bb12b3ad34da6092eab68ae68", "title": "Modeling cognitive deficits following neurodegenerative diseases and traumatic brain injuries with deep convolutional neural networks.", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/dbc0/4694ef17c83bb12b3ad34da6092eab68ae68.pdf"}, {"id": "4ddd55a9f103001da8dc24d123d9223dbb67f884", "title": "Combining Face and Facial Feature Detectors for Face Detection Performance Improvement", "year": "2012", "pdf": "https://pdfs.semanticscholar.org/4ddd/55a9f103001da8dc24d123d9223dbb67f884.pdf"}, {"id": "5302df3216856b0c54267455078c206948c8d545", "title": "14.6 A 0.62mW ultra-low-power convolutional-neural-network face-recognition processor and a CIS integrated with always-on haar-like face detector", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7870354"}, {"id": "eb32aa2988fdfdc8656f9f31b35ed4d52110b039", "title": "Constrained Energy Minimization for Matching-Based Image Recognition", "year": "2010", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597150"}, {"id": "9ec9a80b1c9ee6450f4419f01e457bb87d91bd5e", "title": "Optimized projection for Collaborative Representation based Classification and its applications to face recognition", "year": 2016, "pdf": "https://doi.org/10.1016/j.patrec.2016.01.012"}, {"id": "c9c2de3628be7e249722b12911bebad84b567ce6", "title": "Age and gender recognition in the wild with deep attention", "year": 2017, "pdf": "https://doi.org/10.1016/j.patcog.2017.06.028"}, {"id": "6b6493551017819a3d1f12bbf922a8a8c8cc2a03", "title": "Pose Normalization for Local Appearance-Based Face Recognition", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/6b64/93551017819a3d1f12bbf922a8a8c8cc2a03.pdf"}, {"id": "0b440695c822a8e35184fb2f60dcdaa8a6de84ae", "title": "KinectFaceDB: A Kinect Database for Face Recognition", "year": "2014", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883"}, {"id": "11862b8d6e308127acd3ca0685eda6f0e88dd0a4", "title": "A multi-phase sparse probability framework via entropy minimization for single sample face recognition", "year": 2016, "pdf": "https://doi.org/10.1109/ICIP.2016.7533088"}, {"id": "195b61470720c7faa523e10e68d0c8d8f27d7c7a", "title": "Principal regression analysis", "year": 2011, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995618"}, {"id": "0c069a870367b54dd06d0da63b1e3a900a257298", "title": "Weakly Supervised Learning of Foreground-Background Segmentation Using Masked RBMs", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/cdb8/36785579a4ea3d0eff26dbba8cf845a347d2.pdf"}, {"id": "1fc249ec69b3e23856b42a4e591c59ac60d77118", "title": "Evaluation of a 3D-aided pose invariant 2D face recognition system", "year": 2017, "pdf": "https://doi.org/10.1109/BTAS.2017.8272729"}, {"id": "6a931e7b7475635f089dd33e8d9a2899ae963804", "title": "Unified convolutional neural network for direct facial keypoints detection", "year": "2018", "pdf": "http://doi.org/10.1007/s00371-018-1561-3"}, {"id": "09f5033e1e91dae1f7f31cba2b65bbff1d5f8ca3", "title": "Face Recognition Based on Densely Connected Convolutional Networks", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8499078"}, {"id": "2f28db98e8250cff29bc64b569801c739036e4ef", "title": "Face recognition using modified deep learning neural network", "year": 2017, "pdf": null}, {"id": "17be95132dc3dc121822703e3c8476edd199a10f", "title": "Passive vision: The global webcam imaging network", "year": 2009, "pdf": "https://doi.org/10.1109/AIPR.2009.5466314"}, {"id": "fed8cc533037d7d925df572a440fd89f34d9c1fd", "title": "Simple Triplet Loss Based on Intra/Inter-Class Metric Learning for Face Verification", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.194"}, {"id": "36c5421d477697a8692fe6a51ce62473e690c62f", "title": "Group Affect Prediction Using Emotion Heatmaps and Scene Information", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/36c5/421d477697a8692fe6a51ce62473e690c62f.pdf"}, {"id": "e64b683e32525643a9ddb6b6af8b0472ef5b6a37", "title": "Face Recognition and Retrieval in Video", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/e64b/683e32525643a9ddb6b6af8b0472ef5b6a37.pdf"}, {"id": "77c7d8012fe4179a814c1241a37a2256361bc1a4", "title": "BGP Face Retrieval Based on Coding Pyramid", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8530228"}, {"id": "c2d39d5a0f476f8fe06a1d8023301e3b3b45236f", "title": "Fast Face Detector Training Using Tailored Views", "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751465"}, {"id": "2acd0c90d14bf5003975c5b2414400fb6e53cb44", "title": "Face pair matching with Local Zernike Moments and L2-Norm metric learning", "year": 2014, "pdf": "https://doi.org/10.1109/SIU.2014.6830531"}, {"id": "2d1f86e2c7ba81392c8914edbc079ac64d29b666", "title": "Deep Heterogeneous Feature Fusion for Template-Based Face Recognition", "year": 2017, "pdf": "https://doi.org/10.1109/WACV.2017.71"}, {"id": "d3d71a110f26872c69cf25df70043f7615edcf92", "title": "Learning Compact Feature Descriptor and Adaptive Matching Framework for Face Recognition", "year": 2015, "pdf": "https://www.cise.ufl.edu/~dihong/assets/07094272.pdf"}, {"id": "029b53f32079063047097fa59cfc788b2b550c4b", "title": "Continuous Conditional Neural Fields for Structured Regression", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/b71c/73fcae520f6a5cdbce18c813633fb3d66342.pdf"}, {"id": "0f9bf5d8f9087fcba419379600b86ae9e9940013", "title": "Hybrid human detection and recognition in surveillance", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/0f9b/f5d8f9087fcba419379600b86ae9e9940013.pdf"}, {"id": "06fe63b34fcc8ff68b72b5835c4245d3f9b8a016", "title": "Learning semantic representations of objects and their parts", "year": 2013, "pdf": "https://doi.org/10.1007/s10994-013-5336-9"}, {"id": "258a2dad71cb47c71f408fa0611a4864532f5eba", "title": "Discriminative Optimization of Local Features for Face Recognition", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/258a/2dad71cb47c71f408fa0611a4864532f5eba.pdf"}, {"id": "f633d6dc02b2e55eb24b89f2b8c6df94a2de86dd", "title": "Face alignment by robust discriminative Hough voting", "year": "2016", "pdf": "http://doi.org/10.1016/j.patcog.2016.05.017"}, {"id": "30c96cc041bafa4f480b7b1eb5c45999701fe066", "title": "Discrete Cosine Transform Locality-Sensitive Hashes for Face Retrieval", "year": 2014, "pdf": "https://doi.org/10.1109/TMM.2014.2305633"}, {"id": "7384c39a2d084c93566b98bc4d81532b5ad55892", "title": "A Comparative Study of Face Landmarking Techniques", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/d0a5/0940a1bf951adaf22bd1fc72ea861b606cdb.pdf"}, {"id": "7480d8739eb7ab97c12c14e75658e5444b852e9f", "title": "MLBoost Revisited: A Faster Metric Learning Algorithm for Identity-Based Face Retrieval", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/cfe4/b03951be323394e6749f6a30b2ac9b924479.pdf"}, {"id": "cf6c59d359466c41643017d2c212125aa0ee84b2", "title": "Weakly-supervised deep self-learning for face recognition", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552983"}, {"id": "3d78c144672c4ee76d92d21dad012bdf3c3aa1a0", "title": "Unconstrained Still/Video-Based Face Verification with Deep Convolutional Neural Networks", "year": "2017", "pdf": "http://doi.org/10.1007/s11263-017-1029-3"}, {"id": "aba9acb4a607071af10684f2cfbdefa0507a4e9a", "title": "Uncorrelated multi-set feature learning for color face recognition", "year": 2016, "pdf": "https://doi.org/10.1016/j.patcog.2016.06.010"}, {"id": "8b1f697d81de1245c283b4f8f055b9b76badfa66", "title": "Test Sample Oriented Dictionary Learning for Face Recognition", "year": 2016, "pdf": "https://doi.org/10.1142/S0218126616500171"}, {"id": "50f0c495a214b8d57892d43110728e54e413d47d", "title": "Pairwise support vector machines and their application to large scale problems", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/50f0/c495a214b8d57892d43110728e54e413d47d.pdf"}, {"id": "6f5ce5570dc2960b8b0e4a0a50eab84b7f6af5cb", "title": "Low Resolution Face Recognition Using a Two-Branch Deep Convolutional Neural Network Architecture", "year": "2017", "pdf": "https://arxiv.org/pdf/1706.06247.pdf"}, {"id": "e043d79f4dc41c9decaf637d8ffdd11f8ed59f2b", "title": "Distance metric learning for image and webpage comparison. (Apprentissage de distance pour la comparaison d'images et de pages Web)", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/d589/29d6cc1dfa513b145e47598c446b16487861.pdf"}, {"id": "8813368c6c14552539137aba2b6f8c55f561b75f", "title": "Trunk-Branch Ensemble Convolutional Neural Networks for Video-Based Face Recognition", "year": 2018, "pdf": "https://arxiv.org/pdf/1607.05427v1.pdf"}, {"id": "b8378ab83bc165bc0e3692f2ce593dcc713df34a", "title": "A 3D Approach to Facial Landmarks: Detection, Refinement, and Tracking", "year": 2014, "pdf": "http://cmp.felk.cvut.cz/ftp/articles/cech/Cech-ICPR-2014.pdf"}, {"id": "94a7c97d1e3eb5dbfb20b180780451486597a9be", "title": "Facial attributes for active authentication on mobile devices", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/94a7/c97d1e3eb5dbfb20b180780451486597a9be.pdf"}, {"id": "44078d0daed8b13114cffb15b368acc467f96351", "title": "Triplet probabilistic embedding for face verification and clustering", "year": 2016, "pdf": "http://arxiv.org/pdf/1604.05417v1.pdf"}, {"id": "38f1d8d25c0332798e0929594af2c43092d2c5c8", "title": "Face recognition via fast dense correspondence", "year": 2017, "pdf": null}, {"id": "2e5cfa97f3ecc10ae8f54c1862433285281e6a7c", "title": "Generative Adversarial Networks for Improving Face Classification", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/2e5c/fa97f3ecc10ae8f54c1862433285281e6a7c.pdf"}, {"id": "021a19e240f0ae0554eff814e838e1e396be6572", "title": "Face alignment through subspace constrained mean-shifts", "year": 2009, "pdf": "http://ci2cv.net/static/papers/2009_ICCV_Saragih_2.pdf"}, {"id": "0ca295be89efd110327411d4aa52660bc0eb48c4", "title": "Pairwise Identity Verification via Linear Concentrative Metric Learning", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7786904"}, {"id": "8c7bceba769762126fd3dae78d622908bb83c3d3", "title": "Facial landmark configuration for improved detection", "year": 2012, "pdf": "http://qil.uh.edu/qil/websitecontent/pdf/2015-33.pdf"}, {"id": "19d583bf8c5533d1261ccdc068fdc3ef53b9ffb9", "title": "FaceNet: A unified embedding for face recognition and clustering", "year": 2015, "pdf": "https://arxiv.org/pdf/1503.03832v2.pdf"}, {"id": "4e97b53926d997f451139f74ec1601bbef125599", "title": "Discriminative Regularization for Generative Models", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4e97/b53926d997f451139f74ec1601bbef125599.pdf"}, {"id": "6f7ce89aa3e01045fcd7f1c1635af7a09811a1fe", "title": "A novel rank order LoG filter for interest point detection", "year": 2012, "pdf": "http://mirlab.org/conference_papers/International_Conference/ICASSP%202012/pdfs/0000937.pdf"}, {"id": "46a29a5026142c91e5655454aa2c2f122561db7f", "title": "Margin Emphasized Metric Learning and its application to Gabor feature based face recognition", "year": 2011, "pdf": "https://doi.org/10.1109/FG.2011.5771461"}, {"id": "6f6ce988a13ac08071a0e3349f80b7c8adc7a49d", "title": "A detailed survey on 2D and 3D still face and face video databases part I", "year": 2014, "pdf": null}, {"id": "09e7578833f13a1f91d7a95b71a159af4e38a305", "title": "Convolutional neural networks based on multi-scale additive merging layers for visual smoke recognition", "year": "2018", "pdf": "http://doi.org/10.1007/s00138-018-0990-3"}, {"id": "f65b47093e4d45013f54c3ba09bbcce7140af6bb", "title": "Multiple Anthropological Fisher Kernel Framework and Its Application to Kinship Verification", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354117"}, {"id": "dd8084b2878ca95d8f14bae73e1072922f0cc5da", "title": "Model Distillation with Knowledge Transfer in Face Classification, Alignment and Verification", "year": "2017", "pdf": "https://arxiv.org/pdf/1709.02929.pdf"}, {"id": "5a1255d65e8309131638b3eb94aad5c52ab3629a", "title": "Improving Open Source Face Detection by Combining an Adapted Cascade Classification Pipeline and Active Learning", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/5a12/55d65e8309131638b3eb94aad5c52ab3629a.pdf"}, {"id": "581e920ddb6ecfc2a313a3aa6fed3d933b917ab0", "title": "Automatic Mapping of Remote Crowd Gaze to Stimuli in the Classroom", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/581e/920ddb6ecfc2a313a3aa6fed3d933b917ab0.pdf"}, {"id": "1a2431e3b35a4a4794dc38ef16e9eec2996114a1", "title": "Automated Face Recognition: Challenges and Solutions", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/1a24/31e3b35a4a4794dc38ef16e9eec2996114a1.pdf"}, {"id": "31b05f65405534a696a847dd19c621b7b8588263", "title": "UMDFaces: An annotated face dataset for training deep networks", "year": 2017, "pdf": "http://arxiv.org/abs/1611.01484"}, {"id": "04bb3fa0824d255b01e9db4946ead9f856cc0b59", "title": "Maximum A Posteriori Estimation of Distances Between Deep Features in Still-to-Video Face Recognition", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/c1de/db5ac05c955e53d7ef1f6367fb7badea49b1.pdf"}, {"id": "91e17338a12b5e570907e816bff296b13177971e", "title": "Towards open-set face recognition using hashing functions", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272751"}, {"id": "d2b2b56dd8c1daa61152595caf759a62596a85c9", "title": "Revocable and Non-Invertible Multibiometric Template Protection based on Matrix Transformation", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/d2b2/b56dd8c1daa61152595caf759a62596a85c9.pdf"}, {"id": "6005a30bf103164fe3410185976b6b8b36537aca", "title": "Communication, Networks and Computing", "year": "2018", "pdf": "http://doi.org/10.1007/978-981-13-2372-0"}, {"id": "9b000ccc04a2605f6aab867097ebf7001a52b459", "title": "PCANet: An energy perspective", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/9b00/0ccc04a2605f6aab867097ebf7001a52b459.pdf"}, {"id": "72ef87fb1a49f0e386f123a6b4f5566f51a3a47d", "title": "Minimizing Latency for Secure Coded Computing Using Secret Sharing via Staircase Codes", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/72ef/87fb1a49f0e386f123a6b4f5566f51a3a47d.pdf"}, {"id": "1ce4587e27e2cf8ba5947d3be7a37b4d1317fbee", "title": "Deep fusion of visual signatures for client-server facial analysis", "year": 2016, "pdf": "https://arxiv.org/pdf/1611.00142v2.pdf"}, {"id": "fb228b214e28af26f77cc1195d03c9d851b78ec6", "title": "Facial asymmetry versus facial makeup", "year": 2018, "pdf": null}, {"id": "177d03c5851f7082cb023a20fa8a2cd1dfb59467", "title": "Difference networks and second-order difference networks", "year": 2017, "pdf": null}, {"id": "a856449c724f958dbb2f0629228d26a322153ba3", "title": "Face Mask Extraction in Video Sequence", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.09207.pdf"}, {"id": "e49372992c31412f55579397e615610748b6e6c9", "title": "Online Robust Image Alignment via Subspace Learning from Gradient Orientations", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237457"}, {"id": "2edab301935d1e1faa0859897b617862d4fede63", "title": "Robust and efficient face recognition via low-rank supported extreme learning machine", "year": 2017, "pdf": null}, {"id": "0d5c01814f64f3be401efa4f6495d2677c16a8d8", "title": "Building an intelligent video and image analysis evaluation platform for public security", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AVSS.2017.8078472"}, {"id": "d487142a7b8c84cb47129d91d5837345ccaa88eb", "title": "Face recognition using extended LBP features and multilevel SVM classifier", "year": 2017, "pdf": null}, {"id": "0b37ff3bb5e73bc4fd70877b2a39b27debdb83e2", "title": "Poster abstract: MicroBrain: Compressing deep neural networks for energy-efficient visual inference service", "year": 2017, "pdf": "https://doi.org/10.1109/INFCOMW.2017.8116530"}, {"id": "3e4bd583795875c6550026fc02fb111daee763b4", "title": "Convolutional Sketch Inversion", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/3e4b/d583795875c6550026fc02fb111daee763b4.pdf"}, {"id": "e0eb1d66f244456063409264ed795d9893565011", "title": "Inhibited Softmax for Uncertainty Estimation in Neural Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.01861.pdf"}, {"id": "de0eb358b890d92e8f67592c6e23f0e3b2ba3f66", "title": "Inference-Based Similarity Search in Randomized Montgomery Domains for Privacy-Preserving Biometric Identification", "year": "2018", "pdf": "https://arxiv.org/pdf/1711.01587.pdf"}, {"id": "e86008f6aebd0ab26bdb69d2549b2e8454b8959c", "title": "A survey of deep face recognition in the wild", "year": 2016, "pdf": null}, {"id": "6c825dab1405d193628298adc552a4e4d17c7e69", "title": "On the guessability of binary biometric templates: A practical guessing entropy based approach", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272719"}, {"id": "a022eff5470c3446aca683eae9c18319fd2406d5", "title": "Deep learning for semantic description of visual human traits. (Apprentissage profond pour la description s\u00e9mantique des traits visuels humains)", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/a022/eff5470c3446aca683eae9c18319fd2406d5.pdf"}, {"id": "89c57aab49ceb3c2b939e61f44ecc93257b3ce1b", "title": "Robust face recognition based on weighted DeepFace", "year": 2017, "pdf": null}, {"id": "05db8e3a342f8f239203c24d496e809a65ca7f73", "title": "Learning Diverse Image Colorization", "year": 2017, "pdf": "https://arxiv.org/pdf/1612.01958v1.pdf"}, {"id": "c1f05b723e53ac4eb1133249b445c0011d42ca79", "title": "Deep Convolutional Neural Networks for Image Classification: A Comprehensive Review", "year": 2017, "pdf": "https://doi.org/10.1162/neco_a_00990"}, {"id": "1e8711d2fc4b05eac0699c82f4698154c2b057d3", "title": "The unreasonable effectiveness of small neural ensembles in high-dimensional brain", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.07656.pdf"}, {"id": "85934ee572897ab2da4f294bced88a6531c2fdcc", "title": "A Vision and Speech Enabled, Customizable, Virtual Assistant for Smart Environments", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8431232"}, {"id": "fab60b3db164327be8588bce6ce5e45d5b882db6", "title": "Maximum A Posteriori Estimation of Distances Between Deep Features in Still-to-Video Face Recognition", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/fab6/0b3db164327be8588bce6ce5e45d5b882db6.pdf"}, {"id": "acc5318592303852feba755a1202fb3c683b3b53", "title": "Correction of AI systems by linear discriminants: Probabilistic foundations", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.05321.pdf"}, {"id": "77362789d04db4c51be61eaffa4f43e03759e677", "title": "Fuzzy Analysis and Deep Convolution Neural Networks in Still-to-video Recognition", "year": 2018, "pdf": null}, {"id": "97a0aba4e9a95db17c3d4367f59aad1f02e04b55", "title": "How far did we get in face spoofing detection?", "year": "2018", "pdf": "https://arxiv.org/pdf/1710.09868.pdf"}, {"id": "13831e47759e11f8cc6c77fa64ad34272b409b34", "title": "Compressing deep neural networks for efficient visual inference", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019465"}, {"id": "f16d6152e23b032f59eb794fac2d272daa859a79", "title": "A Face Recognition System Based on Local Binary Patterns and Support Vector Machine for Home Security Service Robot", "year": 2016, "pdf": null}, {"id": "0991634b6b7f5b5d7045fe859c24ccb46152251e", "title": "Real-time Video Summarization on Commodity Hardware", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3243689"}, {"id": "0b4d3e59a0107f0dad22e74054bab1cf1ad9c32e", "title": "Visual Genome: Connecting Language and Vision Using Crowdsourced Dense Image Annotations", "year": 2016, "pdf": "https://ir.cwi.nl/pub/25535/IJCV5595.pdf"}, {"id": "ff01bc3f49130d436fca24b987b7e3beedfa404d", "title": "Fuzzy System-Based Face Detection Robust to In-Plane Rotation Based on Symmetrical Characteristics of a Face", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/ff01/bc3f49130d436fca24b987b7e3beedfa404d.pdf"}, {"id": "b7ec41005ce4384e76e3be854ecccd564d2f89fb", "title": "Granular Computing and Sequential Analysis of Deep Embeddings in Fast Still-to-Video Face Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8441009"}, {"id": "ca92c54ec18e72d3ba86f3b80d82974707882abb", "title": "Talk2Me: A Framework for Device-to-Device Augmented Reality Social Network", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8444578"}, {"id": "59b11427853b7892a9f0d8ab6683d96ce59c2ff2", "title": "A Multi-Face Challenging Dataset for Robust Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.01898.pdf"}, {"id": "0077cd8f97cafd2b389783858a6e4ab7887b0b6b", "title": "Face Image Reconstruction from Deep Templates", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/b971/266b29fcecf1d5efe1c4dcdc2355cb188ab0.pdf"}, {"id": "be72b20247fb4dc4072d962ced77ed89aa40372f", "title": "Efficient Facial Representations for Age, Gender and Identity Recognition in Organizing Photo Albums using Multi-output CNN", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.07718.pdf"}, {"id": "78250b9481690aeb2f558e69c9e782dad2bf90e3", "title": "Face recognition using a new compressive sensing-based feature extraction method", "year": "2017", "pdf": "http://doi.org/10.1007/s11042-017-5007-0"}, {"id": "a40f8881a36bc01f3ae356b3e57eac84e989eef0", "title": "End-to-end semantic face segmentation with conditional random fields as convolutional, recurrent and adversarial networks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/a40f/8881a36bc01f3ae356b3e57eac84e989eef0.pdf"}, {"id": "0ee737085af468f264f57f052ea9b9b1f58d7222", "title": "SiGAN: Siamese Generative Adversarial Network for Identity-Preserving Face Hallucination", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.08370.pdf"}, {"id": "8395cf3535a6628c3bdc9b8d0171568d551f5ff0", "title": "Entropy Non-increasing Games for the Improvement of Dataflow Programming", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/8395/cf3535a6628c3bdc9b8d0171568d551f5ff0.pdf"}, {"id": "8377ac1b2dffb11cf48f456be2531c95d14aa6e5", "title": "Improving the Annotation of DeepFashion Images for Fine-grained Attribute Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11674.pdf"}, {"id": "68dbd8c2d61cfe7e3b896ab47850b95c3ce349a8", "title": "Low-rank constrained collaborative representation for robust face recognition", "year": 2017, "pdf": "https://doi.org/10.1109/MMSP.2017.8122266"}, {"id": "d4288daef6519f6852f59ac6b85e21b8910f2207", "title": "Recurrent Face Aging with Hierarchical AutoRegressive Memory.", "year": "2018", "pdf": "https://www.ncbi.nlm.nih.gov/pubmed/29994505"}, {"id": "5e0f8c355a37a5a89351c02f174e7a5ddcb98683", "title": "Microsoft COCO: Common Objects in Context", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/71b7/178df5d2b112d07e45038cb5637208659ff7.pdf"}, {"id": "5585f55bbfae39b032a24521ce65f905afd69a3e", "title": "Machine Learning VS Transfer Learning Smart Camera Implementation for Face Authentication", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3243710"}, {"id": "94826cb68980e3b89118569c93cfd36f3945fa99", "title": "Computer face-matching technology using two-dimensional photographs accurately matches the facial gestalt of unrelated individuals with the same syndromic form of intellectual disability", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/2ea3/e84ab58ef37dc506ca4ef8b25edb4b170efd.pdf"}, {"id": "7fa4e972da46735971aad52413d17c4014c49e6e", "title": "How to Train Triplet Networks with 100K Identities?", "year": "2017", "pdf": "https://arxiv.org/pdf/1709.02940.pdf"}, {"id": "727d03100d4a8e12620acd7b1d1972bbee54f0e6", "title": "von Mises-Fisher Mixture Model-based Deep learning: Application to Face Verification", "year": "2017", "pdf": "https://arxiv.org/pdf/1706.04264.pdf"}, {"id": "e99718d08aca2c49cd2848eebdbb7c7855b4e484", "title": "Deep neural networks and maximum likelihood search for approximate nearest neighbor in video-based image recognition", "year": 2017, "pdf": null}, {"id": "20c6c93cef3d0417b750a9c56a9587acb93500a4", "title": "B-Face: 0.2 MW CNN-Based Face Recognition Processor with Face Alignment for Mobile User Identification", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8502266"}]}
\ No newline at end of file diff --git a/site/datasets/unknown/megaface.json b/site/datasets/unknown/megaface.json new file mode 100644 index 00000000..ee67dd63 --- /dev/null +++ b/site/datasets/unknown/megaface.json @@ -0,0 +1 @@ +{"id": "28d4e027c7e90b51b7d8908fce68128d1964668a", "paper": {"paper_id": "28d4e027c7e90b51b7d8908fce68128d1964668a", "key": "megaface", "title": "Level Playing Field for Million Scale Face Recognition", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.00393.pdf", "address": "", "name": "MegaFace"}, "citations": [{"id": "d1a43737ca8be02d65684cf64ab2331f66947207", "title": "IJB \u2013 S : IARPA Janus Surveillance Video Benchmark \u2217", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/d1a4/3737ca8be02d65684cf64ab2331f66947207.pdf"}, {"id": "dc13229afbbc8b7a31ed5adfe265d971850c0976", "title": "Learning from Millions of 3 D Scans for Large-scale 3 D Face Recognition", "year": "2017", "pdf": null}, {"id": "57178b36c21fd7f4529ac6748614bb3374714e91", "title": "IARPA Janus Benchmark - C: Face Dataset and Protocol", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217"}, {"id": "1345fb7700389f9d02f203b3cb25ac3594855054", "title": "Hierarchical Training for Large Scale Face Recognition with Few Samples Per Subject", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451561"}, {"id": "e4232e8fd566a7289ccb33f732c9093c9beb84a6", "title": "UHDB31: A Dataset for Better Understanding Face Recognition Across Pose and Illumination Variation", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265511"}, {"id": "9a10845115794117485fc84f9b9e6ada2a7d2b00", "title": "Eye In-painting with Exemplar Generative Adversarial Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1712.03999.pdf"}, {"id": "57246142814d7010d3592e3a39a1ed819dd01f3b", "title": "Verification of Very Low-Resolution Faces Using An Identity-Preserving Deep Face Super-resolution Network", "year": "", "pdf": "https://pdfs.semanticscholar.org/5724/6142814d7010d3592e3a39a1ed819dd01f3b.pdf"}, {"id": "0a23bdc55fb0d04acdac4d3ea0a9994623133562", "title": "Large-scale Bisample Learning on ID vs. Spot Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.03018.pdf"}, {"id": "06bd34951305d9f36eb29cf4532b25272da0e677", "title": "A Fast and Accurate System for Face Detection, Identification, and Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.07586.pdf"}, {"id": "380dd0ddd5d69adc52defc095570d1c22952f5cc", "title": "Improving Smiling Detection with Race and Gender Diversity", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/380d/d0ddd5d69adc52defc095570d1c22952f5cc.pdf"}, {"id": "3d85cf942efda695347c7d95485fcd1e6796ee3a", "title": "Generating Photo-Realistic Training Data to Improve Face Recognition Accuracy", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.00112.pdf"}, {"id": "27da432cf2b9129dce256e5bf7f2f18953eef5a5", "title": "Face Recognition in Low Quality Images: A Survey", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.11519.pdf"}, {"id": "b4f3e9fc0a2b40595ae0a625d1d768a57a7c2eba", "title": "Recognizing Disguised Faces in the Wild", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.08837.pdf"}, {"id": "7323b594d3a8508f809e276aa2d224c4e7ec5a80", "title": "An Experimental Evaluation of Covariates Effects on Unconstrained Face Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.05508.pdf"}, {"id": "c76251049b370f8258d6bbb944c696c30b8bbb85", "title": "Clothing Change Aware Person Identification", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8575455"}, {"id": "cfd4004054399f3a5f536df71f9b9987f060f434", "title": "Person Recognition in Social Media Photos", "year": "2018", "pdf": "https://arxiv.org/pdf/1710.03224.pdf"}, {"id": "9ea37d031a8f112292c0d0f8d731b837462714e9", "title": "Face Recognition: From Traditional to Deep Learning Methods", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.00116.pdf"}, {"id": "f47518fcd69cdbb43dc88fe5259f4f4c61921313", "title": "A Compact Embedding for Facial Expression Similarity", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.11283.pdf"}, {"id": "2306b2a8fba28539306052764a77a0d0f5d1236a", "title": "Surveillance Face Recognition Challenge", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.09691.pdf"}, {"id": "1ffe20eb32dbc4fa85ac7844178937bba97f4bf0", "title": "Face Clustering: Representation and Pairwise Constraints", "year": "2018", "pdf": "https://arxiv.org/pdf/1706.05067.pdf"}, {"id": "8de1c724a42d204c0050fe4c4b4e81a675d7f57c", "title": "Deep Face Recognition: A Survey", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8614364"}, {"id": "65984ea40c3b17bb8965c215b61972cd660f61a7", "title": "Doppelganger Mining for Face Representation Learning", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265436"}, {"id": "4739b47af26137ec52bbfb582e6f37e9e9f5aba0", "title": "Hard Example Mining with Auxiliary Embeddings", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8575283"}, {"id": "1174b869c325222c3446d616975842e8d2989cf2", "title": "CosFace: Large Margin Cosine Loss for Deep Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1801.09414.pdf"}, {"id": "28cd46a078e8fad370b1aba34762a874374513a5", "title": "cvpaper.challenge in 2016: Futuristic Computer Vision through 1, 600 Papers Survey", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/28cd/46a078e8fad370b1aba34762a874374513a5.pdf"}, {"id": "75249ebb85b74e8932496272f38af274fbcfd696", "title": "Face Identification in Large Galleries", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/7524/9ebb85b74e8932496272f38af274fbcfd696.pdf"}, {"id": "e11bc0f7c73c04d38b7fb80bd1ca886495a4d43c", "title": "\u201cA Leopard Cannot Change Its Spots\u201d: Improving Face Recognition Using 3D-Based Caricatures", "year": "2019", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8382306"}, {"id": "3827f1cab643a57e3cd22fbffbf19dd5e8a298a8", "title": "One-Shot Face Recognition via Generative Learning", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373804"}, {"id": "d00787e215bd74d32d80a6c115c4789214da5edb", "title": "Faster and Lighter Online Sparse Dictionary Learning", "year": "", "pdf": "http://pdfs.semanticscholar.org/d007/87e215bd74d32d80a6c115c4789214da5edb.pdf"}, {"id": "0750a816858b601c0dbf4cfb68066ae7e788f05d", "title": "CosFace: Large Margin Cosine Loss for Deep Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1801.09414.pdf"}, {"id": "e3d76f1920c5bf4a60129516abb4a2d8683e48ae", "title": "I Know That Person: Generative Full Body and Face De-identification of People in Images", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014907"}, {"id": "48499deeaa1e31ac22c901d115b8b9867f89f952", "title": "Interim Report of Final Year Project HKU-Face : A Large Scale Dataset for Deep Face Recognition", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/4849/9deeaa1e31ac22c901d115b8b9867f89f952.pdf"}, {"id": "d44a93027208816b9e871101693b05adab576d89", "title": "On the Capacity of Face Representation", "year": "2017", "pdf": "https://arxiv.org/pdf/1709.10433.pdf"}, {"id": "a55dea7981ea0f90d1110005b5f5ca68a3175910", "title": "Are 1, 000 Features Worth A Picture? Combining Crowdsourcing and Face Recognition to Identify Civil War Soldiers", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/4db9/e5f19366fe5d6a98ca43c1d113dac823a14d.pdf"}, {"id": "9a42c519f0aaa68debbe9df00b090ca446d25bc4", "title": "Face Recognition via Centralized Coordinate Learning", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/9a42/c519f0aaa68debbe9df00b090ca446d25bc4.pdf"}, {"id": "282a3ee79a08486f0619caf0ada210f5c3572367", "title": "Accelerated Training for Massive Classification via Dynamic Class Selection", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/282a/3ee79a08486f0619caf0ada210f5c3572367.pdf"}, {"id": "ff01bc3f49130d436fca24b987b7e3beedfa404d", "title": "Fuzzy System-Based Face Detection Robust to In-Plane Rotation Based on Symmetrical Characteristics of a Face", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/ff01/bc3f49130d436fca24b987b7e3beedfa404d.pdf"}, {"id": "8395cf3535a6628c3bdc9b8d0171568d551f5ff0", "title": "Entropy Non-increasing Games for the Improvement of Dataflow Programming", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/8395/cf3535a6628c3bdc9b8d0171568d551f5ff0.pdf"}, {"id": "6dcf418c778f528b5792104760f1fbfe90c6dd6a", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984"}, {"id": "28d4e027c7e90b51b7d8908fce68128d1964668a", "title": "Level Playing Field for Million Scale Face Recognition", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.00393.pdf"}, {"id": "c607572fd2594ca83f732c9790fd590da9e69eb1", "title": "Comparative Evaluation of Deep Architectures for Face Recognition in Unconstrained Environment ( FRUE )", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/c607/572fd2594ca83f732c9790fd590da9e69eb1.pdf"}, {"id": "cb38b4a5e517b4bcb00efbb361f4bdcbcf1dca2c", "title": "Learning towards Minimum Hyperspherical Energy", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.09298.pdf"}, {"id": "8e0becfc5fe3ecdd2ac93fabe34634827b21ef2b", "title": "Learning from Longitudinal Face Demonstration - Where Tractable Deep Modeling Meets Inverse Reinforcement Learning", "year": "2017", "pdf": "https://arxiv.org/pdf/1711.10520.pdf"}, {"id": "18858cc936947fc96b5c06bbe3c6c2faa5614540", "title": "Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/03c1/fc9c3339813ed81ad0de540132f9f695a0f8.pdf"}, {"id": "459e840ec58ef5ffcee60f49a94424eb503e8982", "title": "One-shot Face Recognition by Promoting Underrepresented Classes", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/459e/840ec58ef5ffcee60f49a94424eb503e8982.pdf"}, {"id": "3baa3d5325f00c7edc1f1427fcd5bdc6a420a63f", "title": "Enhancing convolutional neural networks for face recognition with occlusion maps and batch triplet loss", "year": "2018", "pdf": "https://arxiv.org/pdf/1707.07923.pdf"}, {"id": "a2344004f0e1409c0c9473d071a5cfd74bff0a5d", "title": "Learnable PINs: Cross-modal Embeddings for Person Identity", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.00833.pdf"}, {"id": "3c0fffd4cdbfef4ccd92d528d8b8a60ab0929827", "title": "An Empirical Study of Face Recognition under Variations", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373845"}, {"id": "5fff61302adc65d554d5db3722b8a604e62a8377", "title": "Additive Margin Softmax for Face Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1801.05599.pdf"}, {"id": "dde5125baefa1141f1ed50479a3fd67c528a965f", "title": "Synthesizing Normalized Faces from Facial Identity Features", "year": "2017", "pdf": "https://arxiv.org/pdf/1701.04851.pdf"}, {"id": "1b4b3d0ce900996a6da8928e16370e21d15ed83e", "title": "A Review of Performance Evaluation on 2D Face Databases", "year": 2017, "pdf": "https://doi.org/10.1109/BigDataService.2017.38"}, {"id": "d31328b12eef33e7722b8e5505d0f9d9abe2ffd9", "title": "Deep Unsupervised Domain Adaptation for Face Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373866"}, {"id": "6ca6ade6c9acb833790b1b4e7ee8842a04c607f7", "title": "Deep Transfer Network for Unconstrained Face Verification", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3234805"}, {"id": "d2eb1079552fb736e3ba5e494543e67620832c52", "title": "DeSTNet: Densely Fused Spatial Transformer Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.04050.pdf"}, {"id": "31b05f65405534a696a847dd19c621b7b8588263", "title": "UMDFaces: An annotated face dataset for training deep networks", "year": 2017, "pdf": "http://arxiv.org/abs/1611.01484"}, {"id": "0e8760fc198a7e7c9f4193478c0e0700950a86cd", "title": "Brute-Force Facial Landmark Analysis With A 140, 000-Way Classifier", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/0e87/60fc198a7e7c9f4193478c0e0700950a86cd.pdf"}, {"id": "4b48e912a17c79ac95d6a60afed8238c9ab9e553", "title": "Minimum Margin Loss for Deep Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.06741.pdf"}, {"id": "6bb95a0f3668cd36407c85899b71c9fe44bf9573", "title": "Face Attribute Prediction Using Off-The-Shelf Deep Learning Networks", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/6bb9/5a0f3668cd36407c85899b71c9fe44bf9573.pdf"}, {"id": "9ba4a5e0b7bf6e26563d294f1f3de44d95b7f682", "title": "To Frontalize or Not to Frontalize: Do We Really Need Elaborate Pre-processing to Improve Face Recognition?", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354113"}, {"id": "036fac2b87cf04c3d93e8a59da618d56a483a97d", "title": "Query Adaptive Late Fusion for Image Retrieval", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.13103.pdf"}, {"id": "173657da03e3249f4e47457d360ab83b3cefbe63", "title": "HKU-Face : A Large Scale Dataset for Deep Face Recognition Final Report", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/1736/57da03e3249f4e47457d360ab83b3cefbe63.pdf"}, {"id": "c924137ca87e8b4e1557465405744f8b639b16fc", "title": "Seeding Deep Learning using Wireless Localization", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.10242.pdf"}, {"id": "59fc69b3bc4759eef1347161e1248e886702f8f7", "title": "Final Report of Final Year Project HKU-Face : A Large Scale Dataset for Deep Face Recognition", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/59fc/69b3bc4759eef1347161e1248e886702f8f7.pdf"}, {"id": "fe7c0bafbd9a28087e0169259816fca46db1a837", "title": "Seeing Voices and Hearing Faces: Cross-modal biometric matching", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.00326.pdf"}, {"id": "c866a2afc871910e3282fd9498dce4ab20f6a332", "title": "Surveillance Face Recognition Challenge", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.09691.pdf"}, {"id": "380d5138cadccc9b5b91c707ba0a9220b0f39271", "title": "Deep Imbalanced Learning for Face Recognition and Attribute Prediction", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.00194.pdf"}, {"id": "03f7041515d8a6dcb9170763d4f6debd50202c2b", "title": "Clustering Millions of Faces by Identity", "year": 2018, "pdf": "http://biometrics.cse.msu.edu/Publications/Face/OttoWangJain_ClusteringMillionsOfFacesByIdentity_TPAMI17.pdf"}, {"id": "94f74c6314ffd02db581e8e887b5fd81ce288dbf", "title": "A Light CNN for Deep Face Representation with Noisy Labels", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/94f7/4c6314ffd02db581e8e887b5fd81ce288dbf.pdf"}, {"id": "b498640d8f0ac5a628563ff84dbef8d35d12a7ec", "title": "Overcoming catastrophic forgetting with hard attention to the task", "year": "2018", "pdf": "https://arxiv.org/pdf/1801.01423.pdf"}, {"id": "91e17338a12b5e570907e816bff296b13177971e", "title": "Towards open-set face recognition using hashing functions", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272751"}, {"id": "cfb8bc66502fb5f941ecdb22aec1fdbfdb73adce", "title": "Git Loss for Deep Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.08512.pdf"}, {"id": "ea079334121a0ba89452036e5d7f8e18f6851519", "title": "Unsupervised incremental learning of deep descriptors from video streams", "year": "2017", "pdf": "https://arxiv.org/pdf/1708.03615.pdf"}]}
\ No newline at end of file diff --git a/site/datasets/unknown/morph.json b/site/datasets/unknown/morph.json new file mode 100644 index 00000000..6da07f55 --- /dev/null +++ b/site/datasets/unknown/morph.json @@ -0,0 +1 @@ +{"id": "9055b155cbabdce3b98e16e5ac9c0edf00f9552f", "paper": {"paper_id": "9055b155cbabdce3b98e16e5ac9c0edf00f9552f", "key": "morph", "title": "MORPH: a longitudinal image database of normal adult age-progression", "year": 2006, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78", "address": "", "name": "MORPH Commercial"}, "citations": [{"id": "8e8e3f2e66494b9b6782fb9e3f52aeb8e1b0d125", "title": "Detecting and classifying scars, marks, and tattoos found in the wild", "year": 2012, "pdf": "https://www.wjscheirer.com/papers/wjs_btas2012_smt.pdf"}, {"id": "f1748303cc02424704b3a35595610890229567f9", "title": "Learning-based encoding with soft assignment for age estimation under unconstrained imaging conditions", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/f174/8303cc02424704b3a35595610890229567f9.pdf"}, {"id": "7b1ca9a74ab7fbfc32a69e8313ca2f2d78ac6c35", "title": "Comparison of Three Different CNN Architectures for Age Classification", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICSC.2017.61"}, {"id": "dca2bb023b076de1ccd0c6b8d71faeb3fccb3978", "title": "Joint Estimation of Age and Expression by Combining Scattering and Convolutional Networks", "year": 2018, "pdf": "http://doi.acm.org/10.1145/3152118"}, {"id": "8d8461ed57b81e05cc46be8e83260cd68a2ebb4d", "title": "Age identification of Facial Images using Neural Network", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/8d84/61ed57b81e05cc46be8e83260cd68a2ebb4d.pdf"}, {"id": "e198a7b9e61dd19c620e454aaa81ae8f7377ade0", "title": "A hierarchical approach to facial aging", "year": 2010, "pdf": "https://doi.org/10.1109/CVPRW.2010.5543611"}, {"id": "42a5dc91852c8c14ed5f4c3b451c9dc98348bc02", "title": "A Data Augmentation Methodology to Improve Age Estimation Using Convolutional Neural Networks", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2016.021"}, {"id": "1f3ae376b22136a2fe2e96632d4383653a42e4d4", "title": "A technique for face verification across age progression with large age gap", "year": 2017, "pdf": null}, {"id": "15e0b9ba3389a7394c6a1d267b6e06f8758ab82b", "title": "The OU-ISIR Gait Database comprising the Large Population Dataset with Age and performance evaluation of age estimation", "year": 2017, "pdf": "https://doi.org/10.1186/s41074-017-0035-2"}, {"id": "c1482491f553726a8349337351692627a04d5dbe", "title": "When Follow is Just One Click Away: Understanding Twitter Follow Behavior in the 2016 U.S. Presidential Election", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/c148/2491f553726a8349337351692627a04d5dbe.pdf"}, {"id": "37b6d6577541ed991435eaf899a2f82fdd72c790", "title": "Vision-based Human Gender Recognition: A Survey", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/37b6/d6577541ed991435eaf899a2f82fdd72c790.pdf"}, {"id": "7e1c419065fdb9cf2a31aa4b5d0c0e03f7afd54e", "title": "Face Sketch Synthesis via Sparse Representation-Based Greedy Search", "year": 2015, "pdf": "http://jpinfotech.org/wp-content/plugins/infotech/file/upload/pdf/8962Face-Sketch-Synthesis-via-Sparse-Representation-Based-Greedy-Search-pdf.pdf"}, {"id": "1890470d07a090e7b762091c7b9670b5c2e1c348", "title": "Improving Random Forests by Correlation-Enhancing Projections and Sample-Based Sparse Discriminant Selection", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CRV.2016.20"}, {"id": "2c8743089d9c7df04883405a31b5fbe494f175b4", "title": "Real-time full-body human gender recognition in (RGB)-D data", "year": 2015, "pdf": "http://srl.informatik.uni-freiburg.de/publicationsdir/linderICRA15.pdf"}, {"id": "b755505bdd5af078e06427d34b6ac2530ba69b12", "title": "NFRAD: Near-Infrared Face Recognition at a Distance", "year": 2011, "pdf": "http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/Maengetal_NIFaceRecognitionDistance_IJCB11.pdf"}, {"id": "97d1d561362a8b6beb0fdbee28f3862fb48f1380", "title": "Age Synthesis and Estimation via Faces: A Survey", "year": 2010, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.36"}, {"id": "88691c3b74753a8bd67459896c8660deece9a2b0", "title": "Age-based human face image retrieval using zernike moments", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8515123"}, {"id": "5c435c4bc9c9667f968f891e207d241c3e45757a", "title": "\"How old are you?\" : Age Estimation with Tensors of Binary Gaussian Receptive Maps", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/eb6a/13c8a607dfc535e5f31b7c8843335674644c.pdf"}, {"id": "26c7eda262dfda1c3a3597a3bf1f2f1cc4013425", "title": "Some Like It Hot — Visual Guidance for Preference Prediction", "year": 2016, "pdf": null}, {"id": "289cfcd081c4393c7d6f63510747b5372202f855", "title": "Detecting Decision Ambiguity from Facial Images", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373873"}, {"id": "fcd3d69b418d56ae6800a421c8b89ef363418665", "title": "Effects of Aging over Facial Feature Analysis and Face Recognition", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/fcd3/d69b418d56ae6800a421c8b89ef363418665.pdf"}, {"id": "85387549277d6131dc8596ffacc7a21aeee0c6d1", "title": "Attribute Enhanced Face Aging with Wavelet-based Generative Adversarial Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.06647.pdf"}, {"id": "c62c07de196e95eaaf614fb150a4fa4ce49588b4", "title": "SSR-Net: A Compact Soft Stagewise Regression Network for Age Estimation", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/c62c/07de196e95eaaf614fb150a4fa4ce49588b4.pdf"}, {"id": "fa24bf887d3b3f6f58f8305dcd076f0ccc30272a", "title": "Interval Insensitive Loss for Ordinal Classification", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/fa24/bf887d3b3f6f58f8305dcd076f0ccc30272a.pdf"}, {"id": "4562ea84ebfc8d9864e943ed9e44d35997bbdf43", "title": "Small Sample Deep Learning for Newborn Gestational Age Estimation", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.19"}, {"id": "3dda181be266950ba1280b61eb63ac11777029f9", "title": "When Celebrities Endorse Politicians: Analyzing the Behavior of Celebrity Followers in the 2016 U.S. Presidential Election", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/3dda/181be266950ba1280b61eb63ac11777029f9.pdf"}, {"id": "7bc1e7d000ab517161a83b1fedf353e619516ddf", "title": "Age group classification via structured fusion of uncertainty-driven shape features and selected surface features", "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/WACV.2014.6836068"}, {"id": "13f03aab62fc29748114a0219426613cf3ba76ae", "title": "MORPH-II: Feature Vector Documentation", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/13f0/3aab62fc29748114a0219426613cf3ba76ae.pdf"}, {"id": "437642cfc8c34e445ea653929e2d183aaaeeb704", "title": "Component Biologically Inspired Features with Moving Segmentation for Age Estimation", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014815"}, {"id": "2042f1cacea262ec924f74994e49d5e87d9d0445", "title": "A survey of homeland security biometrics and forensics research", "year": 2016, "pdf": null}, {"id": "22bebedc1a5f3556cb4f577bdbe032299a2865e8", "title": "Effective training of convolutional neural networks for face-based gender and age prediction", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/22be/bedc1a5f3556cb4f577bdbe032299a2865e8.pdf"}, {"id": "29db16efc3b378c50511f743e5197a4c0b9e902f", "title": "Deeply Learned Rich Coding for Cross-Dataset Facial Age Estimation", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406401"}, {"id": "083a2bc86e0984968b06593ba06654277b252f00", "title": "Neural evidence for the contribution of holistic processing but not attention allocation to the other-race effect on face memory.", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/083a/2bc86e0984968b06593ba06654277b252f00.pdf"}, {"id": "3af28e9e9e883c235b6418a68bda519b08f9ae26", "title": "Implications of Adult Facial Aging on Biometrics", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/3af2/8e9e9e883c235b6418a68bda519b08f9ae26.pdf"}, {"id": "3b1260d78885e872cf2223f2c6f3d6f6ea254204", "title": "Face Tracking and Recognition at a Distance: A Coaxial & Concentric PTZ Camera System", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/3b12/60d78885e872cf2223f2c6f3d6f6ea254204.pdf"}, {"id": "25337690fed69033ef1ce6944e5b78c4f06ffb81", "title": "Strategic Engagement Regulation: an Integration of Self-enhancement and Engagement", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/2533/7690fed69033ef1ce6944e5b78c4f06ffb81.pdf"}, {"id": "09111da0aedb231c8484601444296c50ca0b5388", "title": "Joint estimation of age, gender and ethnicity: CCA vs. PLS", "year": 2013, "pdf": "https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553737.pdf"}, {"id": "893239f17dc2d17183410d8a98b0440d98fa2679", "title": "UvA-DARE ( Digital Academic Repository ) Expression-Invariant Age Estimation", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/d5b1/6481d34838cc92593f5f311badbf7f18ed5a.pdf"}, {"id": "9f49013657cbce384df9b16a2a17293bc4c9d967", "title": "PTZ camera assisted face acquisition, tracking & recognition", "year": 2010, "pdf": null}, {"id": "23d55061f7baf2ffa1c847d356d8f76d78ebc8c1", "title": "Generic and attribute-specific deep representations for maritime vessels", "year": 2017, "pdf": "https://ipsjcva.springeropen.com/track/pdf/10.1186/s41074-017-0033-4?site=ipsjcva.springeropen.com"}, {"id": "11b904c9180686574e6047bbd9868c354ca46cb4", "title": "Mapping Dynamic Bayesian Networks to <formula formulatype=\"inline\"> <tex Notation=\"TeX\">$\\alpha$</tex></formula>-Shapes: Application to Human Faces Identification Across Ages", "year": 2012, "pdf": null}, {"id": "87b607b8d4858a16731144d17f457a54e488f15d", "title": "Cross-Age Face Recognition on a Very Large Database: The Performance versus Age Intervals and Improvement Using Soft Biometric Traits", "year": "2010", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597532"}, {"id": "be7444c891caf295d162233bdae0e1c79791d566", "title": "Face Recognition Performance under Aging", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014816"}, {"id": "9e28243f047cc9f62a946bf87abedb65b0da0f0a", "title": "Can We Minimize the Influence Due to Gender and Race in Age Estimation?", "year": 2013, "pdf": "https://doi.org/10.1109/ICMLA.2013.141"}, {"id": "931f99bc6865d3d0c80c15d5b1c05338dfe98982", "title": "Identification of Aging Faces Using A-Stack Classification Model", "year": 2009, "pdf": null}, {"id": "93e962f8886eae13b02ad2aa98bdedfbd7e68709", "title": "Dual Conditional GANs for Face Aging and Rejuvenation", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/93e9/62f8886eae13b02ad2aa98bdedfbd7e68709.pdf"}, {"id": "1f41a96589c5b5cee4a55fc7c2ce33e1854b09d6", "title": "Demographic Estimation from Face Images: Human vs. Machine Performance", "year": 2015, "pdf": "http://www.cse.msu.edu/~liuxm/publication/Han_Otto_Liu_Jain_TPAMI14.pdf"}, {"id": "6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c", "title": "Ordinal Regression with Multiple Output CNN for Age Estimation", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532"}, {"id": "197eaa59a003a4c7cc77c1abe0f99d942f716942", "title": "Web image mining towards universal age estimator", "year": 2009, "pdf": "http://www.lv-nus.org/papers%5C2009%5C2009_mm_age.pdf"}, {"id": "892400017e5c93611dc8361e7749135520d66f25", "title": "A comparative study of age-invariant face recognition with different feature representations", "year": 2010, "pdf": "https://doi.org/10.1109/ICARCV.2010.5707394"}, {"id": "28aa89b2c827e5dd65969a5930a0520fdd4a3dc7", "title": "Characterization and Classification of Faces across Age Progression", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/28aa/89b2c827e5dd65969a5930a0520fdd4a3dc7.pdf"}, {"id": "80688e72b00013eabe57ce88be0c204d0b5aea2c", "title": "Semantic Face Signatures: Recognizing and Retrieving Faces by Verbal Descriptions", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8078274"}, {"id": "b47a3c909ee9b099854619054fd00e200b944aa9", "title": "Deeply-Learned Feature for Age Estimation", "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/WACV.2015.77"}, {"id": "1e1e66783f51a206509b0a427e68b3f6e40a27c8", "title": "Semi-supervised Estimation of Perceived Age from Face Images", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/1e1e/66783f51a206509b0a427e68b3f6e40a27c8.pdf"}, {"id": "362ba8317aba71c78dafca023be60fb71320381d", "title": "Nighttime face recognition at large standoff: Cross-distance and cross-spectral matching", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/362b/a8317aba71c78dafca023be60fb71320381d.pdf"}, {"id": "4e8f301dbedc9063831da1306b294f2bd5b10477", "title": "Discriminating Power of FISWG Characteristic Descriptors Under Different Forensic Use Cases", "year": 2016, "pdf": "https://doi.org/10.1109/BIOSIG.2016.7736919"}, {"id": "1135a818b756b057104e45d976546970ba84e612", "title": "Age, Gender, and Fine-Grained Ethnicity Prediction Using Convolutional Neural Networks for the East Asian Face Dataset", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.118"}, {"id": "3dce635ce4b55fb63fc6d41b38640403b152a048", "title": "The Impact of Age and Threshold Variation on Facial Recognition Algorithm Performance Using Images of Children", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411225"}, {"id": "3a4f522fa9d2c37aeaed232b39fcbe1b64495134", "title": "Face Recognition and Retrieval Using Cross-Age Reference Coding With Cross-Age Celebrity Dataset", "year": 2015, "pdf": "http://ijireeice.com/upload/2016/may-16/IJIREEICE%20101.pdf"}, {"id": "33554ff9d1d3b32f67020598320d3d761d7ec81f", "title": "Label Distribution Learning Forests", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/3355/4ff9d1d3b32f67020598320d3d761d7ec81f.pdf"}, {"id": "621741b87258c745f8905d15ba81aaf2a8be60d2", "title": "A Study of Geometric Features for Aging Face Recognition", "year": 2010, "pdf": null}, {"id": "0d3ff34d8490a9a53de1aac1dea70172cb02e013", "title": "Cross-Database Evaluation of Normalized Raw Pixels for Gender Recognition under Unconstrained Settings", "year": 2014, "pdf": "https://doi.org/10.1109/ICPR.2014.542"}, {"id": "ea227e47b8a1e8f55983c34a17a81e5d3fa11cfd", "title": "Age group classification in the wild with deep RoR architecture", "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296549"}, {"id": "1ae3a26a985fe525b23f080a9e1041ecff0509ad", "title": "A Comparative Study of Statistical Conversion of Face to Voice Based on Their Subjective Impressions", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/1ae3/a26a985fe525b23f080a9e1041ecff0509ad.pdf"}, {"id": "0d6b28691e1aa2a17ffaa98b9b38ac3140fb3306", "title": "Review of Perceptual Resemblance of Local Plastic Surgery Facial Images using Near Sets", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/0d6b/28691e1aa2a17ffaa98b9b38ac3140fb3306.pdf"}, {"id": "30b15cdb72760f20f80e04157b57be9029d8a1ab", "title": "Face Aging with Identity-Preserved Conditional Generative Adversarial Networks", "year": "", "pdf": "https://pdfs.semanticscholar.org/30b1/5cdb72760f20f80e04157b57be9029d8a1ab.pdf"}, {"id": "f67a73c9dd1e05bfc51219e70536dbb49158f7bc", "title": "A Gaussian Mixture Model for Classifying the Human Age using DWT and Sammon Map", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/f67a/73c9dd1e05bfc51219e70536dbb49158f7bc.pdf"}, {"id": "97f3d35d3567cd3d973c4c435cdd6832461b7c3c", "title": "Unleash the Black Magic in Age: A Multi-Task Deep Neural Network Approach for Cross-Age Face Verification", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.75"}, {"id": "f214bcc6ecc3309e2efefdc21062441328ff6081", "title": "Speaker verification in score-ageing-quality classification space", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/f214/bcc6ecc3309e2efefdc21062441328ff6081.pdf"}, {"id": "8c5cf18c456957c63248245791f44a685e832345", "title": "Implementation of perceptual resemblance of local plastic surgery facial images using Near Sets", "year": 2015, "pdf": null}, {"id": "cdae8e9cc9d605856cf5709b2fdf61f722d450c1", "title": "Deep Learning for Biometrics : A Survey KALAIVANI SUNDARARAJAN", "year": "2018", "pdf": null}, {"id": "8355d095d3534ef511a9af68a3b2893339e3f96b", "title": "DEX: Deep EXpectation of Apparent Age from a Single Image", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390"}, {"id": "02567fd428a675ca91a0c6786f47f3e35881bcbd", "title": "Deep Label Distribution Learning With Label Ambiguity", "year": 2017, "pdf": "https://arxiv.org/pdf/1611.01731.pdf"}, {"id": "6e198f6cc4199e1c4173944e3df6f39a302cf787", "title": "MORPH-II: Inconsistencies and Cleaning Whitepaper", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/6e19/8f6cc4199e1c4173944e3df6f39a302cf787.pdf"}, {"id": "0f65c91d0ed218eaa7137a0f6ad2f2d731cf8dab", "title": "Multi-Directional Multi-Level Dual-Cross Patterns for Robust Face Recognition", "year": 2016, "pdf": "http://arxiv.org/pdf/1401.5311v1.pdf"}, {"id": "0f92e9121e9c0addc35eedbbd25d0a1faf3ab529", "title": "MORPH-II: A Proposed Subsetting Scheme", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/0f92/e9121e9c0addc35eedbbd25d0a1faf3ab529.pdf"}, {"id": "f46a526c423dd09a3f14f2c9a3838fb4f56fa730", "title": "Anchored Regression Networks Applied to Age Estimation and Super Resolution", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237444"}, {"id": "76a52ebfc5afd547f8b73430ec81456cf25ddd69", "title": "Gender and age recognition for video analytics solution", "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AIPR.2014.7041914"}, {"id": "b034cc919af30e96ee7bed769b93ea5828ae361b", "title": "Soft-Margin Mixture of Regressions", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099915"}, {"id": "0a325d70cc381b136a8f4e471b406cda6d27668c", "title": "A flexible hierarchical approach for facial age estimation based on multiple features", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/0a32/5d70cc381b136a8f4e471b406cda6d27668c.pdf"}, {"id": "68a2ee5c5b76b6feeb3170aaff09b1566ec2cdf5", "title": "Age Classification Based on Simple Lbp Transitions", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/68a2/ee5c5b76b6feeb3170aaff09b1566ec2cdf5.pdf"}, {"id": "5058a7ec68c32984c33f357ebaee96c59e269425", "title": "A Comparative Evaluation of Regression Learning Algorithms for Facial Age Estimation", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/5058/a7ec68c32984c33f357ebaee96c59e269425.pdf"}, {"id": "3edb0fa2d6b0f1984e8e2c523c558cb026b2a983", "title": "Automatic Age Estimation Based on Facial Aging Patterns", "year": 2007, "pdf": "http://cs.nju.edu.cn/zhouzh/zhouzh.files/publication/tpami07.pdf"}, {"id": "b1891010a0722117c57e98809e1f2b26cd8e9ee3", "title": "Analyzing the cross-generalization ability of a hybrid genetic & evolutionary application for multibiometric feature weighting and selection", "year": 2012, "pdf": "http://doi.acm.org/10.1145/2330784.2331026"}, {"id": "633c851ebf625ad7abdda2324e9de093cf623141", "title": "Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961727"}, {"id": "8e0becfc5fe3ecdd2ac93fabe34634827b21ef2b", "title": "Learning from Longitudinal Face Demonstration - Where Tractable Deep Modeling Meets Inverse Reinforcement Learning", "year": "2017", "pdf": "https://arxiv.org/pdf/1711.10520.pdf"}, {"id": "0dd74bbda5dd3d9305636d4b6f0dad85d6e19572", "title": "Heterogeneous Face Attribute Estimation: A Deep Multi-Task Learning Approach", "year": "2018", "pdf": "https://arxiv.org/pdf/1706.00906.pdf"}, {"id": "363e5a0e4cd857e98de72a726ad6f80cea9c50ab", "title": "Fast Landmark Localization With 3D Component Reconstruction and CNN for Cross-Pose Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1708.09580.pdf"}, {"id": "b3cb91a08be4117d6efe57251061b62417867de9", "title": "Label propagation approach for predicting missing biographic labels in face-based biometric records", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/b3cb/91a08be4117d6efe57251061b62417867de9.pdf"}, {"id": "2cbb4a2f8fd2ddac86f8804fd7ffacd830a66b58", "title": "Age and gender classification using convolutional neural networks", "year": 2015, "pdf": "http://www.openu.ac.il/home/hassner/projects/cnn_agegender/CNN_AgeGenderEstimation.pdf"}, {"id": "e5dfd17dbfc9647ccc7323a5d62f65721b318ba9", "title": "Using Correlated Regression Models to Calculate Cumulative Attributes for Age Estimation", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/e5df/d17dbfc9647ccc7323a5d62f65721b318ba9.pdf"}, {"id": "1b29f23f3517ac5bbe9bf5e80cda741b61bb9b12", "title": "D2C: Deep cumulatively and comparatively learning for human age estimation", "year": 2017, "pdf": "https://doi.org/10.1016/j.patcog.2017.01.007"}, {"id": "29ce6b54a87432dc8371f3761a9568eb3c5593b0", "title": "Age Sensitivity of Face Recognition Algorithms", "year": 2013, "pdf": "https://kar.kent.ac.uk/43222/1/Yatie_EST2013_vfinal.pdf"}, {"id": "49e1aa3ecda55465641b2c2acc6583b32f3f1fc6", "title": "Support Vector Machine for age classification", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/49e1/aa3ecda55465641b2c2acc6583b32f3f1fc6.pdf"}, {"id": "ad01c5761c89fdf523565cc0dec77b9a6ec8e694", "title": "Global and Local Consistent Wavelet-domain Age Synthesis", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.07764.pdf"}, {"id": "5b9d9f5a59c48bc8dd409a1bd5abf1d642463d65", "title": "An evolving spatio-temporal approach for gender and age group classification with Spiking Neural Networks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/5b9d/9f5a59c48bc8dd409a1bd5abf1d642463d65.pdf"}, {"id": "42ee339802ec9195b2439074e5b7120e74ad79a4", "title": "VRank: Voting system on Ranking model for human age estimation", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7340789"}, {"id": "2bbbbe1873ad2800954058c749a00f30fe61ab17", "title": "Face Verification across Ages Using Self Organizing Map", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/2bbb/be1873ad2800954058c749a00f30fe61ab17.pdf"}, {"id": "60ce4a9602c27ad17a1366165033fe5e0cf68078", "title": "Combination of Face Regions in Forensic Scenarios.", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/60ce/4a9602c27ad17a1366165033fe5e0cf68078.pdf"}, {"id": "a01f9461bc8cf8fe40c26d223ab1abea5d8e2812", "title": "Facial Age Estimation Through the Fusion of Texture and Local Appearance Descriptors", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/a01f/9461bc8cf8fe40c26d223ab1abea5d8e2812.pdf"}, {"id": "fab83bf8d7cab8fe069796b33d2a6bd70c8cefc6", "title": "Draft: Evaluation Guidelines for Gender Classification and Age Estimation", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/fab8/3bf8d7cab8fe069796b33d2a6bd70c8cefc6.pdf"}, {"id": "7c8adb2fa156b119a1f576652c39fb06e4e19675", "title": "Ordinal Regression using Noisy Pairwise Comparisons for Body Mass Index Range Estimation", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.03268.pdf"}, {"id": "0c6a566ebdac4bd14e80cd6bf4631bc7458e1595", "title": "Local descriptors in application to the aging problem in face recognition", "year": "2013", "pdf": "http://doi.org/10.1016/j.patcog.2013.03.010"}, {"id": "15f3d47b48a7bcbe877f596cb2cfa76e798c6452", "title": "Automatic face analysis tools for interactive digital games", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/15f3/d47b48a7bcbe877f596cb2cfa76e798c6452.pdf"}, {"id": "812d3f6975f4cb87e9905ef18696c5c779227634", "title": "A novel comparative deep learning framework for facial age estimation", "year": 2016, "pdf": "https://doi.org/10.1186/s13640-016-0151-4"}, {"id": "35e4b6c20756cd6388a3c0012b58acee14ffa604", "title": "Gender Classification in Large Databases", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/35e4/b6c20756cd6388a3c0012b58acee14ffa604.pdf"}, {"id": "6d91da37627c05150cb40cac323ca12a91965759", "title": "Gender Politics in the 2016 U.S. Presidential Election: A Computer Vision Approach", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/6d91/da37627c05150cb40cac323ca12a91965759.pdf"}, {"id": "ae88996aad98bfa49a49d653fd9476e5982e982c", "title": "Efficient Group-n Encoding and Decoding for Facial Age Estimation", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8141981"}, {"id": "28715fc79bd5ff8dd8b6fc68a4f2641e5d1b8a08", "title": "Deep Label Distribution Learning for Apparent Age Estimation", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406402"}, {"id": "2b632f090c09435d089ff76220fd31fd314838ae", "title": "Early Adaptation of Deep Priors in Age Prediction from Face Images", "year": 2017, "pdf": "http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w23/Hajibabaei_Early_Adaptation_of_ICCV_2017_paper.pdf"}, {"id": "4f00a48a60cbf750b4ccbd698d5547d83b3eaf3f", "title": "Cubic norm and kernel-based bi-directional PCA: toward age-aware facial kinship verification", "year": 2017, "pdf": null}, {"id": "1c5a5d58a92c161e9ba27e2dfe490e7caaee1ff5", "title": "Age estimation via unsupervised neural networks", "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163119"}, {"id": "6601a96220005883572fad5aa6b4632e413c8e5e", "title": "Recurrent learning of context for salient region detection", "year": "2018", "pdf": "http://doi.org/10.1007/s00779-018-1171-0"}, {"id": "1be498d4bbc30c3bfd0029114c784bc2114d67c0", "title": "Age and Gender Estimation of Unfiltered Faces", "year": 2014, "pdf": "http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf"}, {"id": "7d20c8afb1815205bd696c0dd2e4dbcc66ab4d31", "title": "Ordinal Deep Feature Learning for Facial Age Estimation", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961736"}, {"id": "d3d71a110f26872c69cf25df70043f7615edcf92", "title": "Learning Compact Feature Descriptor and Adaptive Matching Framework for Face Recognition", "year": 2015, "pdf": "https://www.cise.ufl.edu/~dihong/assets/07094272.pdf"}, {"id": "c05a7c72e679745deab9c9d7d481f7b5b9b36bdd", "title": "Naval Postgraduate School Monterey, California Approved for Public Release; Distribution Is Unlimited Biometric Challenges for Future Deployments: a Study of the Impact of Geography, Climate, Culture, and Social Conditions on the Effective Collection of Biometrics", "year": "2010", "pdf": "https://pdfs.semanticscholar.org/c05a/7c72e679745deab9c9d7d481f7b5b9b36bdd.pdf"}, {"id": "694bdadb720d4237b701a5c8c10417843ed89c6f", "title": "Facial age estimation under the terms of local latency using weighted local binary pattern and multi-layer perceptron", "year": 2016, "pdf": null}, {"id": "10195a163ab6348eef37213a46f60a3d87f289c5", "title": "Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks", "year": 2016, "pdf": "https://doi.org/10.1007/s11263-016-0940-3"}, {"id": "c9c2de3628be7e249722b12911bebad84b567ce6", "title": "Age and gender recognition in the wild with deep attention", "year": 2017, "pdf": "https://doi.org/10.1016/j.patcog.2017.06.028"}, {"id": "f839ae810338e3b12c8e2f8db6ce4d725738d2d9", "title": "Learning CNNs for Face Recognition from Weakly Annotated Images", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.115"}, {"id": "d9810786fccee5f5affaef59bc58d2282718af9b", "title": "Adaptive Frame Selection for Enhanced Face Recognition in Low-Resolution Videos", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/d981/0786fccee5f5affaef59bc58d2282718af9b.pdf"}, {"id": "7c11fa4fd91cb57e6e216117febcdd748e595760", "title": "Discriminant Feature Manifold for Facial Aging Estimation", "year": "2010", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597453"}, {"id": "3c0bbfe664fb083644301c67c04a7f1331d9515f", "title": "The Role of Color and Contrast in Facial Age Estimation", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/3c0b/bfe664fb083644301c67c04a7f1331d9515f.pdf"}, {"id": "fffefc1fb840da63e17428fd5de6e79feb726894", "title": "Fine-Grained Age Estimation in the wild with Attention LSTM Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.10445.pdf"}, {"id": "a022eff5470c3446aca683eae9c18319fd2406d5", "title": "Deep learning for semantic description of visual human traits. (Apprentissage profond pour la description s\u00e9mantique des traits visuels humains)", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/a022/eff5470c3446aca683eae9c18319fd2406d5.pdf"}, {"id": "67d7022462c98e6c5de9f2254b46f0b8d3b92089", "title": "Facial image database mining and classification analysis using different distance metrics", "year": 2017, "pdf": null}, {"id": "0e4baf74dfccef7a99c6954bb0968a2e35315c1f", "title": "Gender identification from face images", "year": 2012, "pdf": "https://doi.org/10.1109/SIU.2012.6204517"}, {"id": "59cdafed4eeb8ff7c9bb2d4ecd0edeb8a361ffc1", "title": "International Journal of Computer Application Issue 2, Volume 3 (june 2012) Issn: 2250-1797", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/59cd/afed4eeb8ff7c9bb2d4ecd0edeb8a361ffc1.pdf"}, {"id": "4563b46d42079242f06567b3f2e2f7a80cb3befe", "title": "VADANA: A dense dataset for facial image analysis", "year": 2011, "pdf": "http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf"}, {"id": "a05b1254630257fe27ee195ef05cc50ce6e41f22", "title": "Facial age estimation using hybrid Haar wavelet and color features with Support Vector Regression", "year": 2017, "pdf": null}, {"id": "c398684270543e97e3194674d9cce20acaef3db3", "title": "Comparative Face Soft Biometrics for Human Identification", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/c398/684270543e97e3194674d9cce20acaef3db3.pdf"}, {"id": "90ae02da16b750a9fd43f8a38440f848309c2fe0", "title": "A review of facial gender recognition", "year": 2015, "pdf": "https://doi.org/10.1007/s10044-015-0499-6"}, {"id": "b2749caec0094e186d3ee850151c899b8508f47a", "title": "AVIUE — Artificial vision to improve the user experience", "year": 2013, "pdf": null}, {"id": "b7894c1f805ffd90ab4ab06002c70de68d6982ab", "title": "A comprehensive age estimation on face images using hybrid filter based feature extraction", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/5e87/06fab62a5716c30a245e5963f51793e1d0ed.pdf"}, {"id": "9939498315777b40bed9150d8940fc1ac340e8ba", "title": "ChaLearn Looking at People and Faces of the World: Face AnalysisWorkshop and Challenge 2016", "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789583"}, {"id": "70c2c2d2b7e34ff533a8477eff9763be196cd03a", "title": "Selecting discriminative CLBP patterns for age estimation", "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICMEW.2015.7169755"}, {"id": "070c8ee3876c06f9a65693e536d61097ace40417", "title": "How Do Facial Expressions Contribute to Age Prediction?", "year": 2013, "pdf": "https://doi.org/10.1109/ACPR.2013.161"}, {"id": "e7b6887cd06d0c1aa4902335f7893d7640aef823", "title": "Modelling of Facial Aging and Kinship: A Survey", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/e7b6/887cd06d0c1aa4902335f7893d7640aef823.pdf"}, {"id": "72c0c8deb9ea6f59fde4f5043bff67366b86bd66", "title": "Age progression in Human Faces : A Survey", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/72c0/c8deb9ea6f59fde4f5043bff67366b86bd66.pdf"}, {"id": "37eb666b7eb225ffdafc6f318639bea7f0ba9a24", "title": "Age, Gender and Race Estimation from Unconstrained Face Images", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/37eb/666b7eb225ffdafc6f318639bea7f0ba9a24.pdf"}, {"id": "574751dbb53777101502419127ba8209562c4758", "title": "Gender classification from unaligned facial images using support subspaces", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/5747/51dbb53777101502419127ba8209562c4758.pdf"}, {"id": "cb004e9706f12d1de83b88c209ac948b137caae0", "title": "Face Aging Effect Simulation Using Hidden Factor Analysis Joint Sparse Representation", "year": "2016", "pdf": "https://arxiv.org/pdf/1511.01186.pdf"}, {"id": "e295c1aa47422eb35123053038e62e9aa50a2e3a", "title": "ChaLearn Looking at People 2015: Apparent Age and Cultural Event Recognition Datasets and Results", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406389"}, {"id": "06bd34951305d9f36eb29cf4532b25272da0e677", "title": "A Fast and Accurate System for Face Detection, Identification, and Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.07586.pdf"}, {"id": "75650bfc20036d99314f7ddae8f2baecde3d57e2", "title": "Concave Losses for Robust Dictionary Learning", "year": "2018", "pdf": "https://arxiv.org/pdf/1711.00659.pdf"}, {"id": "c2bd9322fa2d0a00fc62075cc0f1996fc75d42a8", "title": "Age Estimation Guided Convolutional Neural Network for Age-Invariant Face Recognition", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014811"}, {"id": "4aeb87c11fb3a8ad603311c4650040fd3c088832", "title": "Self-paced Mixture of Regressions", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/4aeb/87c11fb3a8ad603311c4650040fd3c088832.pdf"}, {"id": "a6e4f924cf9a12625e85c974f0ed136b43c2f3b5", "title": "A new facial age estimation method using centrally overlapped block based local texture features", "year": "2017", "pdf": "http://doi.org/10.1007/s11042-017-4572-6"}, {"id": "d6c8f5674030cf3f5a2f7cc929bad37a422b26a0", "title": "Face Aging Simulation with Deep Convolutional Generative Adversarial Networks", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337371"}, {"id": "7c8909da44e89a78fe88e815c83a4ced34f99149", "title": "Multi-classifier Q-stack Aging Model for Adult Face Verification", "year": 2010, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICPR.2010.326"}, {"id": "79581c364cefe53bff6bdd224acd4f4bbc43d6d4", "title": "Descriptors and regions of interest fusion for in- and cross-database gender classification in the wild", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/7958/1c364cefe53bff6bdd224acd4f4bbc43d6d4.pdf"}, {"id": "f4251e02f87ac3fcae70bdb313f13ed16ff6ff0a", "title": "Identification using face regions: application and assessment in forensic scenarios.", "year": "2013", "pdf": "https://www.ncbi.nlm.nih.gov/pubmed/24314504"}, {"id": "9755554b13103df634f9b1ef50a147dd02eab02f", "title": "How Transferable Are CNN-Based Features for Age and Gender Classification?", "year": 2016, "pdf": "https://doi.org/10.1109/BIOSIG.2016.7736925"}, {"id": "362a70b6e7d55a777feb7b9fc8bc4d40a57cde8c", "title": "A partial least squares based ranker for fast and accurate age estimation", "year": 2016, "pdf": "http://mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0002792.pdf"}, {"id": "1459d4d16088379c3748322ab0835f50300d9a38", "title": "Cross-Domain Visual Matching via Generalized Similarity Measure and Feature Learning", "year": 2017, "pdf": "https://arxiv.org/pdf/1605.04039v1.pdf"}, {"id": "d79530e1745b33f3b771d0b38d090b40afc04191", "title": "A new method to estimate ages of facial image for large database", "year": 2015, "pdf": "https://doi.org/10.1007/s11042-015-2485-9"}, {"id": "414d78e32ac41e6ff8b192bc095fe55f865a02f4", "title": "Dual-reference Face Retrieval: What Does He/She Look Like at Age 'X'?", "year": "2017", "pdf": null}, {"id": "3d94f81cf4c3a7307e1a976dc6cb7bf38068a381", "title": "Data-Dependent Label Distribution Learning for Age Estimation", "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2655445"}, {"id": "24205a60cbf1cc12d7e0a9d44ed3c2ea64ed7852", "title": "Deep Multi-Task Learning for Joint Prediction of Heterogeneous Face Attributes", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.30"}, {"id": "1b27ca161d2e1d4dd7d22b1247acee5c53db5104", "title": "Facial soft biometric features for forensic face recognition.", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/1b27/ca161d2e1d4dd7d22b1247acee5c53db5104.pdf"}, {"id": "6c5fdec4dfddd51babf0fbd1275f2f2fa6bbbff0", "title": "Age-Invariant Face Recognition Using Coupled Similarity Reference Coding", "year": "2018", "pdf": "http://doi.org/10.1007/s11063-018-9930-5"}, {"id": "4ef09fe9f7fa027427414cf1f2e9050ac7f5e34d", "title": "A proposed method for the improvement in biometric facial image recognition using document-based classification", "year": "2018", "pdf": "http://doi.org/10.1007/s11227-018-2408-4"}, {"id": "d82b93f848d5442f82154a6011d26df8a9cd00e7", "title": "Neural Network Based Age Classification Using Linear Wavelet Transforms", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/d82b/93f848d5442f82154a6011d26df8a9cd00e7.pdf"}, {"id": "6dcf418c778f528b5792104760f1fbfe90c6dd6a", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984"}, {"id": "2bf03e8fb775718ac9730524a176ddd189c0e457", "title": "DASM: An open source active shape model for automatic registration of objects", "year": 2013, "pdf": null}, {"id": "1f3f7df159c338884ddfd38ee2d3ba2e1e3ada69", "title": "Self-enhancement Influences Medial Frontal Cortex Alpha Power to Social Rejection Feedback", "year": "2014", "pdf": "http://doi.org/10.1162/jocn_a_00645"}, {"id": "0fc5c6f06e40014a56f492172f44c073d269e95c", "title": "Genetic and evolutionary biometrics: Exploring value preference space for hybrid feature weighting and selection", "year": 2013, "pdf": "https://doi.org/10.1108/17563781311301490"}, {"id": "23675cb2180aac466944df0edda4677a77c455cd", "title": "Age Estimation Using AAM and Local Facial Features", "year": 2009, "pdf": "http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2009.142"}, {"id": "50ff21e595e0ebe51ae808a2da3b7940549f4035", "title": "Age Group and Gender Estimation in the Wild With Deep RoR Architecture", "year": 2017, "pdf": "http://export.arxiv.org/pdf/1710.02985"}, {"id": "dfabe7ef245ca68185f4fcc96a08602ee1afb3f7", "title": "Group-aware deep feature learning for facial age estimation", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/dfab/e7ef245ca68185f4fcc96a08602ee1afb3f7.pdf"}, {"id": "64ca0dbe60bf8f8243fad73a2494c3fa7a2770e2", "title": "Classification of human age based on Neural Network using FG-NET Aging database and Wavelets", "year": 2012, "pdf": null}, {"id": "bc6de183cd8b2baeebafeefcf40be88468b04b74", "title": "Age Group Recognition using Human Facial Images", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/e057/e713301e089887295543226b79b534fdd145.pdf"}, {"id": "d4288daef6519f6852f59ac6b85e21b8910f2207", "title": "Recurrent Face Aging with Hierarchical AutoRegressive Memory.", "year": "2018", "pdf": "https://www.ncbi.nlm.nih.gov/pubmed/29994505"}, {"id": "6e12ba518816cbc2d987200c461dc907fd19f533", "title": "A computational approach to body mass index prediction from face images", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/6e12/ba518816cbc2d987200c461dc907fd19f533.pdf"}, {"id": "744d23991a2c48d146781405e299e9b3cc14b731", "title": "Aging Face Recognition: A Hierarchical Learning Model Based on Local Patterns Selection", "year": 2016, "pdf": "https://doi.org/10.1109/TIP.2016.2535284"}, {"id": "9329523dc0bd4e2896d5f63cf2440f21b7a16f16", "title": "Do They All Look the Same? Deciphering Chinese, Japanese and Koreans by Fine-Grained Deep Learning", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d853/107e81c3db4a7909b599bff82ab1c48772af.pdf"}, {"id": "e0423788eb91772de9d708a17799179cf3230d63", "title": "Age Classification Using an Optimized CNN Architecture", "year": 2017, "pdf": "http://doi.acm.org/10.1145/3093241.3093277"}, {"id": "ff946df1cea6c107b2c336419c34ea69cc3ddbc4", "title": "EGA — Ethnicity, gender and age, a pre-annotated face database", "year": 2012, "pdf": null}, {"id": "dcb44fc19c1949b1eda9abe998935d567498467d", "title": "Ordinal Zero-Shot Learning", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/dcb4/4fc19c1949b1eda9abe998935d567498467d.pdf"}, {"id": "469ee1b00f7bbfe17c698ccded6f48be398f2a44", "title": "SURVEy: Techniques for Aging Problems in Face Recognition", "year": "", "pdf": "http://pdfs.semanticscholar.org/469e/e1b00f7bbfe17c698ccded6f48be398f2a44.pdf"}, {"id": "bcefb15246b1c9cea74a49a4ba1c990b6b97a19c", "title": "Review on the effects of age, gender, and race demographics on automatic face recognition", "year": "2017", "pdf": "http://doi.org/10.1007/s00371-017-1428-z"}, {"id": "c29fe5ed41d2240352fcb8d8196eb2f31d009522", "title": "Age estimation with dynamic age range", "year": "2015", "pdf": "http://doi.org/10.1007/s11042-015-3230-0"}, {"id": "205f3d654b7d28d00d15b034a8c5b2a8740bd8b6", "title": "Discriminant Learning Through Multiple Principal Angles for Visual Recognition", "year": 2012, "pdf": "https://www.researchgate.net/profile/Ya_Su4/publication/51686551_Discriminant_learning_through_multiple_principal_angles_for_visual_recognition/links/00b495253b0057832b000000.pdf"}, {"id": "4e626b2502ee042cf4d7425a8e7a228789b23856", "title": "Aspects of Age Variation in Facial Morphology Affecting Biometrics", "year": 2007, "pdf": null}, {"id": "fc04a50379e08ddde501816eb1f9560c36d01a39", "title": "Image Pre-processing Using OpenCV Library on MORPH-II Face Database", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.06934.pdf"}, {"id": "ef36ca8abf0a23e661f3b1603057963a70e16704", "title": "An effective approach of facial age estimation with extreme learning machine", "year": 2017, "pdf": null}, {"id": "df674dc0fc813c2a6d539e892bfc74f9a761fbc8", "title": "An Image Mining System for Gender Classification & Age Prediction Based on Facial Features", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/df67/4dc0fc813c2a6d539e892bfc74f9a761fbc8.pdf"}, {"id": "b22f5f0929704752a16d0f65f00a5161a059d8e3", "title": "On soft biometrics", "year": "2015", "pdf": "http://doi.org/10.1016/j.patrec.2015.08.006"}, {"id": "7195cb08ba2248f3214f5dc5d7881533dd1f46d9", "title": "Age Regression Based on Local Image Features", "year": "2010", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5673820"}, {"id": "0b4c4ea4a133b9eab46b217e22bda4d9d13559e6", "title": "MORF: Multi-Objective Random Forests for face characteristic estimation", "year": 2015, "pdf": "http://www.micc.unifi.it/wp-content/uploads/2015/12/2015_morph_random_forests.pdf"}, {"id": "0f112e49240f67a2bd5aaf46f74a924129f03912", "title": "Age-Invariant Face Recognition", "year": 2010, "pdf": "http://www.cse.msu.edu/biometrics/Publications/Face/ParkTongJain_AgeInvariantFaceRecognition_PAMI10.pdf"}, {"id": "ac26166857e55fd5c64ae7194a169ff4e473eb8b", "title": "Personalized Age Progression with Bi-Level Aging Dictionary Learning", "year": "2018", "pdf": "https://arxiv.org/pdf/1706.01039.pdf"}, {"id": "daa120032d8f141bc6aae20e23b1b754a0dd7d5f", "title": "Kernel ELM and CNN Based Facial Age Estimation", "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789593"}, {"id": "fe866887d3c26ee72590c440ed86ffc80e980293", "title": "Understanding Human Aging Patterns from a Machine Perspective", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397011"}, {"id": "1c93b48abdd3ef1021599095a1a5ab5e0e020dd5", "title": "A Compositional and Dynamic Model for Face Aging", "year": 2010, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2009.39"}, {"id": "166186e551b75c9b5adcc9218f0727b73f5de899", "title": "Automatic Age and Gender Recognition in Human Face Image Dataset using Convolutional Neural Network System", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/1661/86e551b75c9b5adcc9218f0727b73f5de899.pdf"}, {"id": "cce332405ce9cd9dccc45efac26d1d614eaa982d", "title": "A Ranking Approach for Human Ages Estimation Based on Face Images", "year": "2010", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597533"}, {"id": "25bcd5aa3bbe56c992547fba683418655b46fc4a", "title": "Pyramid multi-level features for facial demographic estimation", "year": 2017, "pdf": "https://doi.org/10.1016/j.eswa.2017.03.030"}, {"id": "b972683d702a65d3ee7a25bc931a5890d1072b6b", "title": "Demographic Analysis from Biometric Data: Achievements, Challenges, and New Frontiers", "year": 2018, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2017.2669035"}, {"id": "c866a2afc871910e3282fd9498dce4ab20f6a332", "title": "Surveillance Face Recognition Challenge", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.09691.pdf"}, {"id": "aca273a9350b10b6e2ef84f0e3a327255207d0f5", "title": "On soft biometrics", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/efb2/4d35d8f6a46e1ff3800a2481bc7e681e255e.pdf"}, {"id": "e16efd2ae73a325b7571a456618bfa682b51aef8", "title": "Semi-Supervised Adaptive Label Distribution Learning for Facial Age Estimation", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/e16e/fd2ae73a325b7571a456618bfa682b51aef8.pdf"}, {"id": "ec0104286c96707f57df26b4f0a4f49b774c486b", "title": "An Ensemble CNN2ELM for Age Estimation", "year": 2018, "pdf": "http://www.cs.newpaltz.edu/~lik/publications/Mingxing-Duan-IEEE-TIFS-2018.pdf"}, {"id": "3c6542295cf7fe362d7d629ac10670bf30cdabce", "title": "Hierarchical Aggregation Based Deep Aging Feature for Age Prediction", "year": 2015, "pdf": "https://doi.org/10.1109/DICTA.2015.7371264"}, {"id": "d4d1ac1cfb2ca703c4db8cc9a1c7c7531fa940f9", "title": "Gender estimation based on supervised HOG, Action Units and unsupervised CNN feature extraction", "year": 2017, "pdf": null}, {"id": "4f37f71517420c93c6841beb33ca0926354fa11d", "title": "A hybrid deep learning CNN-ELM for age and gender classification", "year": "2018", "pdf": "http://doi.org/10.1016/j.neucom.2017.08.062"}, {"id": "7ad7897740e701eae455457ea74ac10f8b307bed", "title": "Random Subspace Two-dimensional LDA for Face Recognition", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/7ad7/897740e701eae455457ea74ac10f8b307bed.pdf"}, {"id": "69ff40fd5ce7c3e6db95a2b63d763edd8db3a102", "title": "Human Age Estimation via Geometric and Textural Features", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/69ff/40fd5ce7c3e6db95a2b63d763edd8db3a102.pdf"}, {"id": "b1301c722886b6028d11e4c2084ee96466218be4", "title": "Facial Aging and Rejuvenation by Conditional Multi-Adversarial Autoencoder with Ordinal Regression", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/b130/1c722886b6028d11e4c2084ee96466218be4.pdf"}, {"id": "b5f9306c3207ac12ac761e7d028c78b3009a219c", "title": "Age estimation based on extended non-negative matrix factorization", "year": "2011", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6093779"}, {"id": "22532c6e38ded690dc1420f05c18e23f6f24804d", "title": "Chapter 5 Genetic & Evolutionary Biometrics", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/2253/2c6e38ded690dc1420f05c18e23f6f24804d.pdf"}, {"id": "a591639bfcabc4091ff556364074c58521159ff9", "title": "General structured sparse learning for human facial age estimation", "year": 2017, "pdf": null}, {"id": "84fd7c00243dc4f0df8ab1a8c497313ca4f8bd7b", "title": "Perceived Age Estimation from Face Images", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/84fd/7c00243dc4f0df8ab1a8c497313ca4f8bd7b.pdf"}, {"id": "68c17aa1ecbff0787709be74d1d98d9efd78f410", "title": "Gender Classification from Face Images Using Mutual Information and Feature Fusion", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/68c1/7aa1ecbff0787709be74d1d98d9efd78f410.pdf"}, {"id": "997c7ebf467c579b55859315c5a7f15c1df43432", "title": "A Study of Convolutional Sparse Feature Learning for Human Age Estimate", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.141"}, {"id": "ed32df6b122b15a52238777c9993ed31107b4bed", "title": "Age face simulation using aging functions on global and local features with residual images", "year": "2017", "pdf": "http://doi.org/10.1016/j.eswa.2017.03.008"}, {"id": "8411fe1142935a86b819f065cd1f879f16e77401", "title": "Facial Recognition using Modified Local Binary Pattern and Random Forest", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/8411/fe1142935a86b819f065cd1f879f16e77401.pdf"}, {"id": "48cfc5789c246c6ad88ff841701204fc9d6577ed", "title": "Age Invariant Face Recognition Based on DCT Feature Extraction and Kernel Fisher Analysis", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/48cf/c5789c246c6ad88ff841701204fc9d6577ed.pdf"}, {"id": "fa23122db319440fb5a7253e19709f992b4571b9", "title": "Human Age Estimation via Geometric and Textural Features", "year": "2012", "pdf": "https://pdfs.semanticscholar.org/fa23/122db319440fb5a7253e19709f992b4571b9.pdf"}, {"id": "12e4545d07e1793df87520f384b37a015815d2f7", "title": "Age invariant face recognition: a survey on facial aging databases, techniques and effect of aging", "year": "2018", "pdf": "http://doi.org/10.1007/s10462-018-9661-z"}, {"id": "4919663c62174a9bc0cc7f60da8f96974b397ad2", "title": "Human age estimation using enhanced bio-inspired features (EBIF)", "year": 2010, "pdf": "https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/EBIF_5-2-2010_v_5.pdf"}, {"id": "e9fcd15bcb0f65565138dda292e0c71ef25ea8bb", "title": "Analysing Facial Regions for Face Recognition Using Forensic Protocols", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/e9fc/d15bcb0f65565138dda292e0c71ef25ea8bb.pdf"}, {"id": "47a003e6bbfc5bf04a099ca53c67ddfdbea71315", "title": "Q-stack aging model for face verification", "year": 2009, "pdf": "http://www.researchgate.net/profile/Andrzej_Drygajlo/publication/228669241_Q-stack_aging_model_for_face_verification/links/09e4150f7ffb6d3946000000.pdf"}, {"id": "7553fba5c7f73098524fbb58ca534a65f08e91e7", "title": "A Practical Approach for Determination of Human Gender & Age", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/7553/fba5c7f73098524fbb58ca534a65f08e91e7.pdf"}, {"id": "44fb4dcf88eb482e2ab79fd4540caf941613b970", "title": "Perceived Age Estimation under Lighting Condition Change by Covariate Shift Adaptation", "year": 2010, "pdf": "http://www.researchgate.net/profile/Masashi_Sugiyama/publication/220930547_Perceived_Age_Estimation_under_Lighting_Condition_Change_by_Covariate_Shift_Adaptation/links/0fcfd5122b4d406edd000000.pdf"}, {"id": "f47404424270f6a20ba1ba8c2211adfba032f405", "title": "Identification of Face Age range Group using Neural Network", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/f474/04424270f6a20ba1ba8c2211adfba032f405.pdf"}, {"id": "a608c5f8fd42af6e9bd332ab516c8c2af7063c61", "title": "Age Estimation via Grouping and Decision Fusion", "year": 2015, "pdf": "http://mcl.usc.edu/wp-content/uploads/2016/01/Liu-TIFS-2015-10.pdf"}, {"id": "0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e", "title": "Large Age-Gap face verification by feature injection in deep networks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf"}]}
\ No newline at end of file diff --git a/site/datasets/unknown/morph_nc.json b/site/datasets/unknown/morph_nc.json new file mode 100644 index 00000000..98ba886e --- /dev/null +++ b/site/datasets/unknown/morph_nc.json @@ -0,0 +1 @@ +{"id": "9055b155cbabdce3b98e16e5ac9c0edf00f9552f", "paper": {"paper_id": "9055b155cbabdce3b98e16e5ac9c0edf00f9552f", "key": "morph_nc", "title": "MORPH: a longitudinal image database of normal adult age-progression", "year": 2006, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78", "address": "", "name": "MORPH Non-Commercial"}, "citations": [{"id": "8e8e3f2e66494b9b6782fb9e3f52aeb8e1b0d125", "title": "Detecting and classifying scars, marks, and tattoos found in the wild", "year": 2012, "pdf": "https://www.wjscheirer.com/papers/wjs_btas2012_smt.pdf"}, {"id": "f1748303cc02424704b3a35595610890229567f9", "title": "Learning-based encoding with soft assignment for age estimation under unconstrained imaging conditions", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/f174/8303cc02424704b3a35595610890229567f9.pdf"}, {"id": "7b1ca9a74ab7fbfc32a69e8313ca2f2d78ac6c35", "title": "Comparison of Three Different CNN Architectures for Age Classification", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICSC.2017.61"}, {"id": "dca2bb023b076de1ccd0c6b8d71faeb3fccb3978", "title": "Joint Estimation of Age and Expression by Combining Scattering and Convolutional Networks", "year": 2018, "pdf": "http://doi.acm.org/10.1145/3152118"}, {"id": "8d8461ed57b81e05cc46be8e83260cd68a2ebb4d", "title": "Age identification of Facial Images using Neural Network", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/8d84/61ed57b81e05cc46be8e83260cd68a2ebb4d.pdf"}, {"id": "e198a7b9e61dd19c620e454aaa81ae8f7377ade0", "title": "A hierarchical approach to facial aging", "year": 2010, "pdf": "https://doi.org/10.1109/CVPRW.2010.5543611"}, {"id": "42a5dc91852c8c14ed5f4c3b451c9dc98348bc02", "title": "A Data Augmentation Methodology to Improve Age Estimation Using Convolutional Neural Networks", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2016.021"}, {"id": "1f3ae376b22136a2fe2e96632d4383653a42e4d4", "title": "A technique for face verification across age progression with large age gap", "year": 2017, "pdf": null}, {"id": "15e0b9ba3389a7394c6a1d267b6e06f8758ab82b", "title": "The OU-ISIR Gait Database comprising the Large Population Dataset with Age and performance evaluation of age estimation", "year": 2017, "pdf": "https://doi.org/10.1186/s41074-017-0035-2"}, {"id": "c1482491f553726a8349337351692627a04d5dbe", "title": "When Follow is Just One Click Away: Understanding Twitter Follow Behavior in the 2016 U.S. Presidential Election", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/c148/2491f553726a8349337351692627a04d5dbe.pdf"}, {"id": "37b6d6577541ed991435eaf899a2f82fdd72c790", "title": "Vision-based Human Gender Recognition: A Survey", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/37b6/d6577541ed991435eaf899a2f82fdd72c790.pdf"}, {"id": "7e1c419065fdb9cf2a31aa4b5d0c0e03f7afd54e", "title": "Face Sketch Synthesis via Sparse Representation-Based Greedy Search", "year": 2015, "pdf": "http://jpinfotech.org/wp-content/plugins/infotech/file/upload/pdf/8962Face-Sketch-Synthesis-via-Sparse-Representation-Based-Greedy-Search-pdf.pdf"}, {"id": "1890470d07a090e7b762091c7b9670b5c2e1c348", "title": "Improving Random Forests by Correlation-Enhancing Projections and Sample-Based Sparse Discriminant Selection", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CRV.2016.20"}, {"id": "2c8743089d9c7df04883405a31b5fbe494f175b4", "title": "Real-time full-body human gender recognition in (RGB)-D data", "year": 2015, "pdf": "http://srl.informatik.uni-freiburg.de/publicationsdir/linderICRA15.pdf"}, {"id": "b755505bdd5af078e06427d34b6ac2530ba69b12", "title": "NFRAD: Near-Infrared Face Recognition at a Distance", "year": 2011, "pdf": "http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/Maengetal_NIFaceRecognitionDistance_IJCB11.pdf"}, {"id": "97d1d561362a8b6beb0fdbee28f3862fb48f1380", "title": "Age Synthesis and Estimation via Faces: A Survey", "year": 2010, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.36"}, {"id": "88691c3b74753a8bd67459896c8660deece9a2b0", "title": "Age-based human face image retrieval using zernike moments", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8515123"}, {"id": "5c435c4bc9c9667f968f891e207d241c3e45757a", "title": "\"How old are you?\" : Age Estimation with Tensors of Binary Gaussian Receptive Maps", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/eb6a/13c8a607dfc535e5f31b7c8843335674644c.pdf"}, {"id": "26c7eda262dfda1c3a3597a3bf1f2f1cc4013425", "title": "Some Like It Hot — Visual Guidance for Preference Prediction", "year": 2016, "pdf": null}, {"id": "289cfcd081c4393c7d6f63510747b5372202f855", "title": "Detecting Decision Ambiguity from Facial Images", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373873"}, {"id": "fcd3d69b418d56ae6800a421c8b89ef363418665", "title": "Effects of Aging over Facial Feature Analysis and Face Recognition", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/fcd3/d69b418d56ae6800a421c8b89ef363418665.pdf"}, {"id": "85387549277d6131dc8596ffacc7a21aeee0c6d1", "title": "Attribute Enhanced Face Aging with Wavelet-based Generative Adversarial Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.06647.pdf"}, {"id": "c62c07de196e95eaaf614fb150a4fa4ce49588b4", "title": "SSR-Net: A Compact Soft Stagewise Regression Network for Age Estimation", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/c62c/07de196e95eaaf614fb150a4fa4ce49588b4.pdf"}, {"id": "fa24bf887d3b3f6f58f8305dcd076f0ccc30272a", "title": "Interval Insensitive Loss for Ordinal Classification", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/fa24/bf887d3b3f6f58f8305dcd076f0ccc30272a.pdf"}, {"id": "4562ea84ebfc8d9864e943ed9e44d35997bbdf43", "title": "Small Sample Deep Learning for Newborn Gestational Age Estimation", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.19"}, {"id": "3dda181be266950ba1280b61eb63ac11777029f9", "title": "When Celebrities Endorse Politicians: Analyzing the Behavior of Celebrity Followers in the 2016 U.S. Presidential Election", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/3dda/181be266950ba1280b61eb63ac11777029f9.pdf"}, {"id": "7bc1e7d000ab517161a83b1fedf353e619516ddf", "title": "Age group classification via structured fusion of uncertainty-driven shape features and selected surface features", "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/WACV.2014.6836068"}, {"id": "13f03aab62fc29748114a0219426613cf3ba76ae", "title": "MORPH-II: Feature Vector Documentation", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/13f0/3aab62fc29748114a0219426613cf3ba76ae.pdf"}, {"id": "437642cfc8c34e445ea653929e2d183aaaeeb704", "title": "Component Biologically Inspired Features with Moving Segmentation for Age Estimation", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014815"}, {"id": "2042f1cacea262ec924f74994e49d5e87d9d0445", "title": "A survey of homeland security biometrics and forensics research", "year": 2016, "pdf": null}, {"id": "22bebedc1a5f3556cb4f577bdbe032299a2865e8", "title": "Effective training of convolutional neural networks for face-based gender and age prediction", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/22be/bedc1a5f3556cb4f577bdbe032299a2865e8.pdf"}, {"id": "29db16efc3b378c50511f743e5197a4c0b9e902f", "title": "Deeply Learned Rich Coding for Cross-Dataset Facial Age Estimation", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406401"}, {"id": "083a2bc86e0984968b06593ba06654277b252f00", "title": "Neural evidence for the contribution of holistic processing but not attention allocation to the other-race effect on face memory.", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/083a/2bc86e0984968b06593ba06654277b252f00.pdf"}, {"id": "3af28e9e9e883c235b6418a68bda519b08f9ae26", "title": "Implications of Adult Facial Aging on Biometrics", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/3af2/8e9e9e883c235b6418a68bda519b08f9ae26.pdf"}, {"id": "3b1260d78885e872cf2223f2c6f3d6f6ea254204", "title": "Face Tracking and Recognition at a Distance: A Coaxial & Concentric PTZ Camera System", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/3b12/60d78885e872cf2223f2c6f3d6f6ea254204.pdf"}, {"id": "25337690fed69033ef1ce6944e5b78c4f06ffb81", "title": "Strategic Engagement Regulation: an Integration of Self-enhancement and Engagement", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/2533/7690fed69033ef1ce6944e5b78c4f06ffb81.pdf"}, {"id": "09111da0aedb231c8484601444296c50ca0b5388", "title": "Joint estimation of age, gender and ethnicity: CCA vs. PLS", "year": 2013, "pdf": "https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553737.pdf"}, {"id": "893239f17dc2d17183410d8a98b0440d98fa2679", "title": "UvA-DARE ( Digital Academic Repository ) Expression-Invariant Age Estimation", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/d5b1/6481d34838cc92593f5f311badbf7f18ed5a.pdf"}, {"id": "9f49013657cbce384df9b16a2a17293bc4c9d967", "title": "PTZ camera assisted face acquisition, tracking & recognition", "year": 2010, "pdf": null}, {"id": "23d55061f7baf2ffa1c847d356d8f76d78ebc8c1", "title": "Generic and attribute-specific deep representations for maritime vessels", "year": 2017, "pdf": "https://ipsjcva.springeropen.com/track/pdf/10.1186/s41074-017-0033-4?site=ipsjcva.springeropen.com"}, {"id": "11b904c9180686574e6047bbd9868c354ca46cb4", "title": "Mapping Dynamic Bayesian Networks to <formula formulatype=\"inline\"> <tex Notation=\"TeX\">$\\alpha$</tex></formula>-Shapes: Application to Human Faces Identification Across Ages", "year": 2012, "pdf": null}, {"id": "87b607b8d4858a16731144d17f457a54e488f15d", "title": "Cross-Age Face Recognition on a Very Large Database: The Performance versus Age Intervals and Improvement Using Soft Biometric Traits", "year": "2010", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597532"}, {"id": "be7444c891caf295d162233bdae0e1c79791d566", "title": "Face Recognition Performance under Aging", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014816"}, {"id": "9e28243f047cc9f62a946bf87abedb65b0da0f0a", "title": "Can We Minimize the Influence Due to Gender and Race in Age Estimation?", "year": 2013, "pdf": "https://doi.org/10.1109/ICMLA.2013.141"}, {"id": "931f99bc6865d3d0c80c15d5b1c05338dfe98982", "title": "Identification of Aging Faces Using A-Stack Classification Model", "year": 2009, "pdf": null}, {"id": "93e962f8886eae13b02ad2aa98bdedfbd7e68709", "title": "Dual Conditional GANs for Face Aging and Rejuvenation", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/93e9/62f8886eae13b02ad2aa98bdedfbd7e68709.pdf"}, {"id": "1f41a96589c5b5cee4a55fc7c2ce33e1854b09d6", "title": "Demographic Estimation from Face Images: Human vs. Machine Performance", "year": 2015, "pdf": "http://www.cse.msu.edu/~liuxm/publication/Han_Otto_Liu_Jain_TPAMI14.pdf"}, {"id": "6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c", "title": "Ordinal Regression with Multiple Output CNN for Age Estimation", "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532"}, {"id": "197eaa59a003a4c7cc77c1abe0f99d942f716942", "title": "Web image mining towards universal age estimator", "year": 2009, "pdf": "http://www.lv-nus.org/papers%5C2009%5C2009_mm_age.pdf"}, {"id": "892400017e5c93611dc8361e7749135520d66f25", "title": "A comparative study of age-invariant face recognition with different feature representations", "year": 2010, "pdf": "https://doi.org/10.1109/ICARCV.2010.5707394"}, {"id": "28aa89b2c827e5dd65969a5930a0520fdd4a3dc7", "title": "Characterization and Classification of Faces across Age Progression", "year": 2009, "pdf": "http://pdfs.semanticscholar.org/28aa/89b2c827e5dd65969a5930a0520fdd4a3dc7.pdf"}, {"id": "80688e72b00013eabe57ce88be0c204d0b5aea2c", "title": "Semantic Face Signatures: Recognizing and Retrieving Faces by Verbal Descriptions", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8078274"}, {"id": "b47a3c909ee9b099854619054fd00e200b944aa9", "title": "Deeply-Learned Feature for Age Estimation", "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/WACV.2015.77"}, {"id": "1e1e66783f51a206509b0a427e68b3f6e40a27c8", "title": "Semi-supervised Estimation of Perceived Age from Face Images", "year": 2010, "pdf": "http://pdfs.semanticscholar.org/1e1e/66783f51a206509b0a427e68b3f6e40a27c8.pdf"}, {"id": "362ba8317aba71c78dafca023be60fb71320381d", "title": "Nighttime face recognition at large standoff: Cross-distance and cross-spectral matching", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/362b/a8317aba71c78dafca023be60fb71320381d.pdf"}, {"id": "4e8f301dbedc9063831da1306b294f2bd5b10477", "title": "Discriminating Power of FISWG Characteristic Descriptors Under Different Forensic Use Cases", "year": 2016, "pdf": "https://doi.org/10.1109/BIOSIG.2016.7736919"}, {"id": "1135a818b756b057104e45d976546970ba84e612", "title": "Age, Gender, and Fine-Grained Ethnicity Prediction Using Convolutional Neural Networks for the East Asian Face Dataset", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.118"}, {"id": "3dce635ce4b55fb63fc6d41b38640403b152a048", "title": "The Impact of Age and Threshold Variation on Facial Recognition Algorithm Performance Using Images of Children", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411225"}, {"id": "3a4f522fa9d2c37aeaed232b39fcbe1b64495134", "title": "Face Recognition and Retrieval Using Cross-Age Reference Coding With Cross-Age Celebrity Dataset", "year": 2015, "pdf": "http://ijireeice.com/upload/2016/may-16/IJIREEICE%20101.pdf"}, {"id": "33554ff9d1d3b32f67020598320d3d761d7ec81f", "title": "Label Distribution Learning Forests", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/3355/4ff9d1d3b32f67020598320d3d761d7ec81f.pdf"}, {"id": "621741b87258c745f8905d15ba81aaf2a8be60d2", "title": "A Study of Geometric Features for Aging Face Recognition", "year": 2010, "pdf": null}, {"id": "0d3ff34d8490a9a53de1aac1dea70172cb02e013", "title": "Cross-Database Evaluation of Normalized Raw Pixels for Gender Recognition under Unconstrained Settings", "year": 2014, "pdf": "https://doi.org/10.1109/ICPR.2014.542"}, {"id": "ea227e47b8a1e8f55983c34a17a81e5d3fa11cfd", "title": "Age group classification in the wild with deep RoR architecture", "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296549"}, {"id": "1ae3a26a985fe525b23f080a9e1041ecff0509ad", "title": "A Comparative Study of Statistical Conversion of Face to Voice Based on Their Subjective Impressions", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/1ae3/a26a985fe525b23f080a9e1041ecff0509ad.pdf"}, {"id": "0d6b28691e1aa2a17ffaa98b9b38ac3140fb3306", "title": "Review of Perceptual Resemblance of Local Plastic Surgery Facial Images using Near Sets", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/0d6b/28691e1aa2a17ffaa98b9b38ac3140fb3306.pdf"}, {"id": "30b15cdb72760f20f80e04157b57be9029d8a1ab", "title": "Face Aging with Identity-Preserved Conditional Generative Adversarial Networks", "year": "", "pdf": "https://pdfs.semanticscholar.org/30b1/5cdb72760f20f80e04157b57be9029d8a1ab.pdf"}, {"id": "f67a73c9dd1e05bfc51219e70536dbb49158f7bc", "title": "A Gaussian Mixture Model for Classifying the Human Age using DWT and Sammon Map", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/f67a/73c9dd1e05bfc51219e70536dbb49158f7bc.pdf"}, {"id": "97f3d35d3567cd3d973c4c435cdd6832461b7c3c", "title": "Unleash the Black Magic in Age: A Multi-Task Deep Neural Network Approach for Cross-Age Face Verification", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.75"}, {"id": "f214bcc6ecc3309e2efefdc21062441328ff6081", "title": "Speaker verification in score-ageing-quality classification space", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/f214/bcc6ecc3309e2efefdc21062441328ff6081.pdf"}, {"id": "8c5cf18c456957c63248245791f44a685e832345", "title": "Implementation of perceptual resemblance of local plastic surgery facial images using Near Sets", "year": 2015, "pdf": null}, {"id": "cdae8e9cc9d605856cf5709b2fdf61f722d450c1", "title": "Deep Learning for Biometrics : A Survey KALAIVANI SUNDARARAJAN", "year": "2018", "pdf": null}, {"id": "8355d095d3534ef511a9af68a3b2893339e3f96b", "title": "DEX: Deep EXpectation of Apparent Age from a Single Image", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390"}, {"id": "02567fd428a675ca91a0c6786f47f3e35881bcbd", "title": "Deep Label Distribution Learning With Label Ambiguity", "year": 2017, "pdf": "https://arxiv.org/pdf/1611.01731.pdf"}, {"id": "6e198f6cc4199e1c4173944e3df6f39a302cf787", "title": "MORPH-II: Inconsistencies and Cleaning Whitepaper", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/6e19/8f6cc4199e1c4173944e3df6f39a302cf787.pdf"}, {"id": "0f65c91d0ed218eaa7137a0f6ad2f2d731cf8dab", "title": "Multi-Directional Multi-Level Dual-Cross Patterns for Robust Face Recognition", "year": 2016, "pdf": "http://arxiv.org/pdf/1401.5311v1.pdf"}, {"id": "0f92e9121e9c0addc35eedbbd25d0a1faf3ab529", "title": "MORPH-II: A Proposed Subsetting Scheme", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/0f92/e9121e9c0addc35eedbbd25d0a1faf3ab529.pdf"}, {"id": "f46a526c423dd09a3f14f2c9a3838fb4f56fa730", "title": "Anchored Regression Networks Applied to Age Estimation and Super Resolution", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237444"}, {"id": "76a52ebfc5afd547f8b73430ec81456cf25ddd69", "title": "Gender and age recognition for video analytics solution", "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/AIPR.2014.7041914"}, {"id": "b034cc919af30e96ee7bed769b93ea5828ae361b", "title": "Soft-Margin Mixture of Regressions", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099915"}, {"id": "0a325d70cc381b136a8f4e471b406cda6d27668c", "title": "A flexible hierarchical approach for facial age estimation based on multiple features", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/0a32/5d70cc381b136a8f4e471b406cda6d27668c.pdf"}, {"id": "68a2ee5c5b76b6feeb3170aaff09b1566ec2cdf5", "title": "Age Classification Based on Simple Lbp Transitions", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/68a2/ee5c5b76b6feeb3170aaff09b1566ec2cdf5.pdf"}, {"id": "5058a7ec68c32984c33f357ebaee96c59e269425", "title": "A Comparative Evaluation of Regression Learning Algorithms for Facial Age Estimation", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/5058/a7ec68c32984c33f357ebaee96c59e269425.pdf"}, {"id": "3edb0fa2d6b0f1984e8e2c523c558cb026b2a983", "title": "Automatic Age Estimation Based on Facial Aging Patterns", "year": 2007, "pdf": "http://cs.nju.edu.cn/zhouzh/zhouzh.files/publication/tpami07.pdf"}, {"id": "b1891010a0722117c57e98809e1f2b26cd8e9ee3", "title": "Analyzing the cross-generalization ability of a hybrid genetic & evolutionary application for multibiometric feature weighting and selection", "year": 2012, "pdf": "http://doi.acm.org/10.1145/2330784.2331026"}, {"id": "633c851ebf625ad7abdda2324e9de093cf623141", "title": "Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961727"}, {"id": "8e0becfc5fe3ecdd2ac93fabe34634827b21ef2b", "title": "Learning from Longitudinal Face Demonstration - Where Tractable Deep Modeling Meets Inverse Reinforcement Learning", "year": "2017", "pdf": "https://arxiv.org/pdf/1711.10520.pdf"}, {"id": "0dd74bbda5dd3d9305636d4b6f0dad85d6e19572", "title": "Heterogeneous Face Attribute Estimation: A Deep Multi-Task Learning Approach", "year": "2018", "pdf": "https://arxiv.org/pdf/1706.00906.pdf"}, {"id": "363e5a0e4cd857e98de72a726ad6f80cea9c50ab", "title": "Fast Landmark Localization With 3D Component Reconstruction and CNN for Cross-Pose Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1708.09580.pdf"}, {"id": "b3cb91a08be4117d6efe57251061b62417867de9", "title": "Label propagation approach for predicting missing biographic labels in face-based biometric records", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/b3cb/91a08be4117d6efe57251061b62417867de9.pdf"}, {"id": "2cbb4a2f8fd2ddac86f8804fd7ffacd830a66b58", "title": "Age and gender classification using convolutional neural networks", "year": 2015, "pdf": "http://www.openu.ac.il/home/hassner/projects/cnn_agegender/CNN_AgeGenderEstimation.pdf"}, {"id": "e5dfd17dbfc9647ccc7323a5d62f65721b318ba9", "title": "Using Correlated Regression Models to Calculate Cumulative Attributes for Age Estimation", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/e5df/d17dbfc9647ccc7323a5d62f65721b318ba9.pdf"}, {"id": "1b29f23f3517ac5bbe9bf5e80cda741b61bb9b12", "title": "D2C: Deep cumulatively and comparatively learning for human age estimation", "year": 2017, "pdf": "https://doi.org/10.1016/j.patcog.2017.01.007"}, {"id": "29ce6b54a87432dc8371f3761a9568eb3c5593b0", "title": "Age Sensitivity of Face Recognition Algorithms", "year": 2013, "pdf": "https://kar.kent.ac.uk/43222/1/Yatie_EST2013_vfinal.pdf"}, {"id": "49e1aa3ecda55465641b2c2acc6583b32f3f1fc6", "title": "Support Vector Machine for age classification", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/49e1/aa3ecda55465641b2c2acc6583b32f3f1fc6.pdf"}, {"id": "ad01c5761c89fdf523565cc0dec77b9a6ec8e694", "title": "Global and Local Consistent Wavelet-domain Age Synthesis", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.07764.pdf"}, {"id": "5b9d9f5a59c48bc8dd409a1bd5abf1d642463d65", "title": "An evolving spatio-temporal approach for gender and age group classification with Spiking Neural Networks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/5b9d/9f5a59c48bc8dd409a1bd5abf1d642463d65.pdf"}, {"id": "42ee339802ec9195b2439074e5b7120e74ad79a4", "title": "VRank: Voting system on Ranking model for human age estimation", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7340789"}, {"id": "2bbbbe1873ad2800954058c749a00f30fe61ab17", "title": "Face Verification across Ages Using Self Organizing Map", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/2bbb/be1873ad2800954058c749a00f30fe61ab17.pdf"}, {"id": "60ce4a9602c27ad17a1366165033fe5e0cf68078", "title": "Combination of Face Regions in Forensic Scenarios.", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/60ce/4a9602c27ad17a1366165033fe5e0cf68078.pdf"}, {"id": "a01f9461bc8cf8fe40c26d223ab1abea5d8e2812", "title": "Facial Age Estimation Through the Fusion of Texture and Local Appearance Descriptors", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/a01f/9461bc8cf8fe40c26d223ab1abea5d8e2812.pdf"}, {"id": "fab83bf8d7cab8fe069796b33d2a6bd70c8cefc6", "title": "Draft: Evaluation Guidelines for Gender Classification and Age Estimation", "year": 2011, "pdf": "http://pdfs.semanticscholar.org/fab8/3bf8d7cab8fe069796b33d2a6bd70c8cefc6.pdf"}, {"id": "7c8adb2fa156b119a1f576652c39fb06e4e19675", "title": "Ordinal Regression using Noisy Pairwise Comparisons for Body Mass Index Range Estimation", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.03268.pdf"}, {"id": "0c6a566ebdac4bd14e80cd6bf4631bc7458e1595", "title": "Local descriptors in application to the aging problem in face recognition", "year": "2013", "pdf": "http://doi.org/10.1016/j.patcog.2013.03.010"}, {"id": "15f3d47b48a7bcbe877f596cb2cfa76e798c6452", "title": "Automatic face analysis tools for interactive digital games", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/15f3/d47b48a7bcbe877f596cb2cfa76e798c6452.pdf"}, {"id": "812d3f6975f4cb87e9905ef18696c5c779227634", "title": "A novel comparative deep learning framework for facial age estimation", "year": 2016, "pdf": "https://doi.org/10.1186/s13640-016-0151-4"}, {"id": "35e4b6c20756cd6388a3c0012b58acee14ffa604", "title": "Gender Classification in Large Databases", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/35e4/b6c20756cd6388a3c0012b58acee14ffa604.pdf"}, {"id": "6d91da37627c05150cb40cac323ca12a91965759", "title": "Gender Politics in the 2016 U.S. Presidential Election: A Computer Vision Approach", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/6d91/da37627c05150cb40cac323ca12a91965759.pdf"}, {"id": "ae88996aad98bfa49a49d653fd9476e5982e982c", "title": "Efficient Group-n Encoding and Decoding for Facial Age Estimation", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8141981"}, {"id": "28715fc79bd5ff8dd8b6fc68a4f2641e5d1b8a08", "title": "Deep Label Distribution Learning for Apparent Age Estimation", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406402"}, {"id": "2b632f090c09435d089ff76220fd31fd314838ae", "title": "Early Adaptation of Deep Priors in Age Prediction from Face Images", "year": 2017, "pdf": "http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w23/Hajibabaei_Early_Adaptation_of_ICCV_2017_paper.pdf"}, {"id": "4f00a48a60cbf750b4ccbd698d5547d83b3eaf3f", "title": "Cubic norm and kernel-based bi-directional PCA: toward age-aware facial kinship verification", "year": 2017, "pdf": null}, {"id": "1c5a5d58a92c161e9ba27e2dfe490e7caaee1ff5", "title": "Age estimation via unsupervised neural networks", "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163119"}, {"id": "6601a96220005883572fad5aa6b4632e413c8e5e", "title": "Recurrent learning of context for salient region detection", "year": "2018", "pdf": "http://doi.org/10.1007/s00779-018-1171-0"}, {"id": "1be498d4bbc30c3bfd0029114c784bc2114d67c0", "title": "Age and Gender Estimation of Unfiltered Faces", "year": 2014, "pdf": "http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf"}, {"id": "7d20c8afb1815205bd696c0dd2e4dbcc66ab4d31", "title": "Ordinal Deep Feature Learning for Facial Age Estimation", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961736"}, {"id": "d3d71a110f26872c69cf25df70043f7615edcf92", "title": "Learning Compact Feature Descriptor and Adaptive Matching Framework for Face Recognition", "year": 2015, "pdf": "https://www.cise.ufl.edu/~dihong/assets/07094272.pdf"}, {"id": "c05a7c72e679745deab9c9d7d481f7b5b9b36bdd", "title": "Naval Postgraduate School Monterey, California Approved for Public Release; Distribution Is Unlimited Biometric Challenges for Future Deployments: a Study of the Impact of Geography, Climate, Culture, and Social Conditions on the Effective Collection of Biometrics", "year": "2010", "pdf": "https://pdfs.semanticscholar.org/c05a/7c72e679745deab9c9d7d481f7b5b9b36bdd.pdf"}, {"id": "694bdadb720d4237b701a5c8c10417843ed89c6f", "title": "Facial age estimation under the terms of local latency using weighted local binary pattern and multi-layer perceptron", "year": 2016, "pdf": null}, {"id": "10195a163ab6348eef37213a46f60a3d87f289c5", "title": "Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks", "year": 2016, "pdf": "https://doi.org/10.1007/s11263-016-0940-3"}, {"id": "c9c2de3628be7e249722b12911bebad84b567ce6", "title": "Age and gender recognition in the wild with deep attention", "year": 2017, "pdf": "https://doi.org/10.1016/j.patcog.2017.06.028"}, {"id": "f839ae810338e3b12c8e2f8db6ce4d725738d2d9", "title": "Learning CNNs for Face Recognition from Weakly Annotated Images", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.115"}, {"id": "d9810786fccee5f5affaef59bc58d2282718af9b", "title": "Adaptive Frame Selection for Enhanced Face Recognition in Low-Resolution Videos", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/d981/0786fccee5f5affaef59bc58d2282718af9b.pdf"}, {"id": "7c11fa4fd91cb57e6e216117febcdd748e595760", "title": "Discriminant Feature Manifold for Facial Aging Estimation", "year": "2010", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597453"}, {"id": "3c0bbfe664fb083644301c67c04a7f1331d9515f", "title": "The Role of Color and Contrast in Facial Age Estimation", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/3c0b/bfe664fb083644301c67c04a7f1331d9515f.pdf"}, {"id": "fffefc1fb840da63e17428fd5de6e79feb726894", "title": "Fine-Grained Age Estimation in the wild with Attention LSTM Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.10445.pdf"}, {"id": "a022eff5470c3446aca683eae9c18319fd2406d5", "title": "Deep learning for semantic description of visual human traits. (Apprentissage profond pour la description s\u00e9mantique des traits visuels humains)", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/a022/eff5470c3446aca683eae9c18319fd2406d5.pdf"}, {"id": "67d7022462c98e6c5de9f2254b46f0b8d3b92089", "title": "Facial image database mining and classification analysis using different distance metrics", "year": 2017, "pdf": null}, {"id": "0e4baf74dfccef7a99c6954bb0968a2e35315c1f", "title": "Gender identification from face images", "year": 2012, "pdf": "https://doi.org/10.1109/SIU.2012.6204517"}, {"id": "59cdafed4eeb8ff7c9bb2d4ecd0edeb8a361ffc1", "title": "International Journal of Computer Application Issue 2, Volume 3 (june 2012) Issn: 2250-1797", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/59cd/afed4eeb8ff7c9bb2d4ecd0edeb8a361ffc1.pdf"}, {"id": "4563b46d42079242f06567b3f2e2f7a80cb3befe", "title": "VADANA: A dense dataset for facial image analysis", "year": 2011, "pdf": "http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf"}, {"id": "a05b1254630257fe27ee195ef05cc50ce6e41f22", "title": "Facial age estimation using hybrid Haar wavelet and color features with Support Vector Regression", "year": 2017, "pdf": null}, {"id": "c398684270543e97e3194674d9cce20acaef3db3", "title": "Comparative Face Soft Biometrics for Human Identification", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/c398/684270543e97e3194674d9cce20acaef3db3.pdf"}, {"id": "90ae02da16b750a9fd43f8a38440f848309c2fe0", "title": "A review of facial gender recognition", "year": 2015, "pdf": "https://doi.org/10.1007/s10044-015-0499-6"}, {"id": "b2749caec0094e186d3ee850151c899b8508f47a", "title": "AVIUE — Artificial vision to improve the user experience", "year": 2013, "pdf": null}, {"id": "b7894c1f805ffd90ab4ab06002c70de68d6982ab", "title": "A comprehensive age estimation on face images using hybrid filter based feature extraction", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/5e87/06fab62a5716c30a245e5963f51793e1d0ed.pdf"}, {"id": "9939498315777b40bed9150d8940fc1ac340e8ba", "title": "ChaLearn Looking at People and Faces of the World: Face AnalysisWorkshop and Challenge 2016", "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789583"}, {"id": "70c2c2d2b7e34ff533a8477eff9763be196cd03a", "title": "Selecting discriminative CLBP patterns for age estimation", "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICMEW.2015.7169755"}, {"id": "070c8ee3876c06f9a65693e536d61097ace40417", "title": "How Do Facial Expressions Contribute to Age Prediction?", "year": 2013, "pdf": "https://doi.org/10.1109/ACPR.2013.161"}, {"id": "e7b6887cd06d0c1aa4902335f7893d7640aef823", "title": "Modelling of Facial Aging and Kinship: A Survey", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/e7b6/887cd06d0c1aa4902335f7893d7640aef823.pdf"}, {"id": "72c0c8deb9ea6f59fde4f5043bff67366b86bd66", "title": "Age progression in Human Faces : A Survey", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/72c0/c8deb9ea6f59fde4f5043bff67366b86bd66.pdf"}, {"id": "37eb666b7eb225ffdafc6f318639bea7f0ba9a24", "title": "Age, Gender and Race Estimation from Unconstrained Face Images", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/37eb/666b7eb225ffdafc6f318639bea7f0ba9a24.pdf"}, {"id": "574751dbb53777101502419127ba8209562c4758", "title": "Gender classification from unaligned facial images using support subspaces", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/5747/51dbb53777101502419127ba8209562c4758.pdf"}, {"id": "cb004e9706f12d1de83b88c209ac948b137caae0", "title": "Face Aging Effect Simulation Using Hidden Factor Analysis Joint Sparse Representation", "year": "2016", "pdf": "https://arxiv.org/pdf/1511.01186.pdf"}, {"id": "e295c1aa47422eb35123053038e62e9aa50a2e3a", "title": "ChaLearn Looking at People 2015: Apparent Age and Cultural Event Recognition Datasets and Results", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406389"}, {"id": "06bd34951305d9f36eb29cf4532b25272da0e677", "title": "A Fast and Accurate System for Face Detection, Identification, and Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.07586.pdf"}, {"id": "75650bfc20036d99314f7ddae8f2baecde3d57e2", "title": "Concave Losses for Robust Dictionary Learning", "year": "2018", "pdf": "https://arxiv.org/pdf/1711.00659.pdf"}, {"id": "c2bd9322fa2d0a00fc62075cc0f1996fc75d42a8", "title": "Age Estimation Guided Convolutional Neural Network for Age-Invariant Face Recognition", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014811"}, {"id": "4aeb87c11fb3a8ad603311c4650040fd3c088832", "title": "Self-paced Mixture of Regressions", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/4aeb/87c11fb3a8ad603311c4650040fd3c088832.pdf"}, {"id": "a6e4f924cf9a12625e85c974f0ed136b43c2f3b5", "title": "A new facial age estimation method using centrally overlapped block based local texture features", "year": "2017", "pdf": "http://doi.org/10.1007/s11042-017-4572-6"}, {"id": "d6c8f5674030cf3f5a2f7cc929bad37a422b26a0", "title": "Face Aging Simulation with Deep Convolutional Generative Adversarial Networks", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337371"}, {"id": "7c8909da44e89a78fe88e815c83a4ced34f99149", "title": "Multi-classifier Q-stack Aging Model for Adult Face Verification", "year": 2010, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICPR.2010.326"}, {"id": "79581c364cefe53bff6bdd224acd4f4bbc43d6d4", "title": "Descriptors and regions of interest fusion for in- and cross-database gender classification in the wild", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/7958/1c364cefe53bff6bdd224acd4f4bbc43d6d4.pdf"}, {"id": "f4251e02f87ac3fcae70bdb313f13ed16ff6ff0a", "title": "Identification using face regions: application and assessment in forensic scenarios.", "year": "2013", "pdf": "https://www.ncbi.nlm.nih.gov/pubmed/24314504"}, {"id": "9755554b13103df634f9b1ef50a147dd02eab02f", "title": "How Transferable Are CNN-Based Features for Age and Gender Classification?", "year": 2016, "pdf": "https://doi.org/10.1109/BIOSIG.2016.7736925"}, {"id": "362a70b6e7d55a777feb7b9fc8bc4d40a57cde8c", "title": "A partial least squares based ranker for fast and accurate age estimation", "year": 2016, "pdf": "http://mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0002792.pdf"}, {"id": "1459d4d16088379c3748322ab0835f50300d9a38", "title": "Cross-Domain Visual Matching via Generalized Similarity Measure and Feature Learning", "year": 2017, "pdf": "https://arxiv.org/pdf/1605.04039v1.pdf"}, {"id": "d79530e1745b33f3b771d0b38d090b40afc04191", "title": "A new method to estimate ages of facial image for large database", "year": 2015, "pdf": "https://doi.org/10.1007/s11042-015-2485-9"}, {"id": "414d78e32ac41e6ff8b192bc095fe55f865a02f4", "title": "Dual-reference Face Retrieval: What Does He/She Look Like at Age 'X'?", "year": "2017", "pdf": null}, {"id": "3d94f81cf4c3a7307e1a976dc6cb7bf38068a381", "title": "Data-Dependent Label Distribution Learning for Age Estimation", "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2655445"}, {"id": "24205a60cbf1cc12d7e0a9d44ed3c2ea64ed7852", "title": "Deep Multi-Task Learning for Joint Prediction of Heterogeneous Face Attributes", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.30"}, {"id": "1b27ca161d2e1d4dd7d22b1247acee5c53db5104", "title": "Facial soft biometric features for forensic face recognition.", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/1b27/ca161d2e1d4dd7d22b1247acee5c53db5104.pdf"}, {"id": "6c5fdec4dfddd51babf0fbd1275f2f2fa6bbbff0", "title": "Age-Invariant Face Recognition Using Coupled Similarity Reference Coding", "year": "2018", "pdf": "http://doi.org/10.1007/s11063-018-9930-5"}, {"id": "4ef09fe9f7fa027427414cf1f2e9050ac7f5e34d", "title": "A proposed method for the improvement in biometric facial image recognition using document-based classification", "year": "2018", "pdf": "http://doi.org/10.1007/s11227-018-2408-4"}, {"id": "d82b93f848d5442f82154a6011d26df8a9cd00e7", "title": "Neural Network Based Age Classification Using Linear Wavelet Transforms", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/d82b/93f848d5442f82154a6011d26df8a9cd00e7.pdf"}, {"id": "6dcf418c778f528b5792104760f1fbfe90c6dd6a", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984"}, {"id": "2bf03e8fb775718ac9730524a176ddd189c0e457", "title": "DASM: An open source active shape model for automatic registration of objects", "year": 2013, "pdf": null}, {"id": "1f3f7df159c338884ddfd38ee2d3ba2e1e3ada69", "title": "Self-enhancement Influences Medial Frontal Cortex Alpha Power to Social Rejection Feedback", "year": "2014", "pdf": "http://doi.org/10.1162/jocn_a_00645"}, {"id": "0fc5c6f06e40014a56f492172f44c073d269e95c", "title": "Genetic and evolutionary biometrics: Exploring value preference space for hybrid feature weighting and selection", "year": 2013, "pdf": "https://doi.org/10.1108/17563781311301490"}, {"id": "23675cb2180aac466944df0edda4677a77c455cd", "title": "Age Estimation Using AAM and Local Facial Features", "year": 2009, "pdf": "http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2009.142"}, {"id": "50ff21e595e0ebe51ae808a2da3b7940549f4035", "title": "Age Group and Gender Estimation in the Wild With Deep RoR Architecture", "year": 2017, "pdf": "http://export.arxiv.org/pdf/1710.02985"}, {"id": "dfabe7ef245ca68185f4fcc96a08602ee1afb3f7", "title": "Group-aware deep feature learning for facial age estimation", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/dfab/e7ef245ca68185f4fcc96a08602ee1afb3f7.pdf"}, {"id": "64ca0dbe60bf8f8243fad73a2494c3fa7a2770e2", "title": "Classification of human age based on Neural Network using FG-NET Aging database and Wavelets", "year": 2012, "pdf": null}, {"id": "bc6de183cd8b2baeebafeefcf40be88468b04b74", "title": "Age Group Recognition using Human Facial Images", "year": "2015", "pdf": "https://pdfs.semanticscholar.org/e057/e713301e089887295543226b79b534fdd145.pdf"}, {"id": "d4288daef6519f6852f59ac6b85e21b8910f2207", "title": "Recurrent Face Aging with Hierarchical AutoRegressive Memory.", "year": "2018", "pdf": "https://www.ncbi.nlm.nih.gov/pubmed/29994505"}, {"id": "6e12ba518816cbc2d987200c461dc907fd19f533", "title": "A computational approach to body mass index prediction from face images", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/6e12/ba518816cbc2d987200c461dc907fd19f533.pdf"}, {"id": "744d23991a2c48d146781405e299e9b3cc14b731", "title": "Aging Face Recognition: A Hierarchical Learning Model Based on Local Patterns Selection", "year": 2016, "pdf": "https://doi.org/10.1109/TIP.2016.2535284"}, {"id": "9329523dc0bd4e2896d5f63cf2440f21b7a16f16", "title": "Do They All Look the Same? Deciphering Chinese, Japanese and Koreans by Fine-Grained Deep Learning", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d853/107e81c3db4a7909b599bff82ab1c48772af.pdf"}, {"id": "e0423788eb91772de9d708a17799179cf3230d63", "title": "Age Classification Using an Optimized CNN Architecture", "year": 2017, "pdf": "http://doi.acm.org/10.1145/3093241.3093277"}, {"id": "ff946df1cea6c107b2c336419c34ea69cc3ddbc4", "title": "EGA — Ethnicity, gender and age, a pre-annotated face database", "year": 2012, "pdf": null}, {"id": "dcb44fc19c1949b1eda9abe998935d567498467d", "title": "Ordinal Zero-Shot Learning", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/dcb4/4fc19c1949b1eda9abe998935d567498467d.pdf"}, {"id": "469ee1b00f7bbfe17c698ccded6f48be398f2a44", "title": "SURVEy: Techniques for Aging Problems in Face Recognition", "year": "", "pdf": "http://pdfs.semanticscholar.org/469e/e1b00f7bbfe17c698ccded6f48be398f2a44.pdf"}, {"id": "bcefb15246b1c9cea74a49a4ba1c990b6b97a19c", "title": "Review on the effects of age, gender, and race demographics on automatic face recognition", "year": "2017", "pdf": "http://doi.org/10.1007/s00371-017-1428-z"}, {"id": "c29fe5ed41d2240352fcb8d8196eb2f31d009522", "title": "Age estimation with dynamic age range", "year": "2015", "pdf": "http://doi.org/10.1007/s11042-015-3230-0"}, {"id": "205f3d654b7d28d00d15b034a8c5b2a8740bd8b6", "title": "Discriminant Learning Through Multiple Principal Angles for Visual Recognition", "year": 2012, "pdf": "https://www.researchgate.net/profile/Ya_Su4/publication/51686551_Discriminant_learning_through_multiple_principal_angles_for_visual_recognition/links/00b495253b0057832b000000.pdf"}, {"id": "4e626b2502ee042cf4d7425a8e7a228789b23856", "title": "Aspects of Age Variation in Facial Morphology Affecting Biometrics", "year": 2007, "pdf": null}, {"id": "fc04a50379e08ddde501816eb1f9560c36d01a39", "title": "Image Pre-processing Using OpenCV Library on MORPH-II Face Database", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.06934.pdf"}, {"id": "ef36ca8abf0a23e661f3b1603057963a70e16704", "title": "An effective approach of facial age estimation with extreme learning machine", "year": 2017, "pdf": null}, {"id": "df674dc0fc813c2a6d539e892bfc74f9a761fbc8", "title": "An Image Mining System for Gender Classification & Age Prediction Based on Facial Features", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/df67/4dc0fc813c2a6d539e892bfc74f9a761fbc8.pdf"}, {"id": "b22f5f0929704752a16d0f65f00a5161a059d8e3", "title": "On soft biometrics", "year": "2015", "pdf": "http://doi.org/10.1016/j.patrec.2015.08.006"}, {"id": "7195cb08ba2248f3214f5dc5d7881533dd1f46d9", "title": "Age Regression Based on Local Image Features", "year": "2010", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5673820"}, {"id": "0b4c4ea4a133b9eab46b217e22bda4d9d13559e6", "title": "MORF: Multi-Objective Random Forests for face characteristic estimation", "year": 2015, "pdf": "http://www.micc.unifi.it/wp-content/uploads/2015/12/2015_morph_random_forests.pdf"}, {"id": "0f112e49240f67a2bd5aaf46f74a924129f03912", "title": "Age-Invariant Face Recognition", "year": 2010, "pdf": "http://www.cse.msu.edu/biometrics/Publications/Face/ParkTongJain_AgeInvariantFaceRecognition_PAMI10.pdf"}, {"id": "ac26166857e55fd5c64ae7194a169ff4e473eb8b", "title": "Personalized Age Progression with Bi-Level Aging Dictionary Learning", "year": "2018", "pdf": "https://arxiv.org/pdf/1706.01039.pdf"}, {"id": "daa120032d8f141bc6aae20e23b1b754a0dd7d5f", "title": "Kernel ELM and CNN Based Facial Age Estimation", "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789593"}, {"id": "fe866887d3c26ee72590c440ed86ffc80e980293", "title": "Understanding Human Aging Patterns from a Machine Perspective", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397011"}, {"id": "1c93b48abdd3ef1021599095a1a5ab5e0e020dd5", "title": "A Compositional and Dynamic Model for Face Aging", "year": 2010, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2009.39"}, {"id": "166186e551b75c9b5adcc9218f0727b73f5de899", "title": "Automatic Age and Gender Recognition in Human Face Image Dataset using Convolutional Neural Network System", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/1661/86e551b75c9b5adcc9218f0727b73f5de899.pdf"}, {"id": "cce332405ce9cd9dccc45efac26d1d614eaa982d", "title": "A Ranking Approach for Human Ages Estimation Based on Face Images", "year": "2010", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597533"}, {"id": "25bcd5aa3bbe56c992547fba683418655b46fc4a", "title": "Pyramid multi-level features for facial demographic estimation", "year": 2017, "pdf": "https://doi.org/10.1016/j.eswa.2017.03.030"}, {"id": "b972683d702a65d3ee7a25bc931a5890d1072b6b", "title": "Demographic Analysis from Biometric Data: Achievements, Challenges, and New Frontiers", "year": 2018, "pdf": "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2017.2669035"}, {"id": "c866a2afc871910e3282fd9498dce4ab20f6a332", "title": "Surveillance Face Recognition Challenge", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.09691.pdf"}, {"id": "aca273a9350b10b6e2ef84f0e3a327255207d0f5", "title": "On soft biometrics", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/efb2/4d35d8f6a46e1ff3800a2481bc7e681e255e.pdf"}, {"id": "e16efd2ae73a325b7571a456618bfa682b51aef8", "title": "Semi-Supervised Adaptive Label Distribution Learning for Facial Age Estimation", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/e16e/fd2ae73a325b7571a456618bfa682b51aef8.pdf"}, {"id": "ec0104286c96707f57df26b4f0a4f49b774c486b", "title": "An Ensemble CNN2ELM for Age Estimation", "year": 2018, "pdf": "http://www.cs.newpaltz.edu/~lik/publications/Mingxing-Duan-IEEE-TIFS-2018.pdf"}, {"id": "3c6542295cf7fe362d7d629ac10670bf30cdabce", "title": "Hierarchical Aggregation Based Deep Aging Feature for Age Prediction", "year": 2015, "pdf": "https://doi.org/10.1109/DICTA.2015.7371264"}, {"id": "d4d1ac1cfb2ca703c4db8cc9a1c7c7531fa940f9", "title": "Gender estimation based on supervised HOG, Action Units and unsupervised CNN feature extraction", "year": 2017, "pdf": null}, {"id": "4f37f71517420c93c6841beb33ca0926354fa11d", "title": "A hybrid deep learning CNN-ELM for age and gender classification", "year": "2018", "pdf": "http://doi.org/10.1016/j.neucom.2017.08.062"}, {"id": "7ad7897740e701eae455457ea74ac10f8b307bed", "title": "Random Subspace Two-dimensional LDA for Face Recognition", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/7ad7/897740e701eae455457ea74ac10f8b307bed.pdf"}, {"id": "69ff40fd5ce7c3e6db95a2b63d763edd8db3a102", "title": "Human Age Estimation via Geometric and Textural Features", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/69ff/40fd5ce7c3e6db95a2b63d763edd8db3a102.pdf"}, {"id": "b1301c722886b6028d11e4c2084ee96466218be4", "title": "Facial Aging and Rejuvenation by Conditional Multi-Adversarial Autoencoder with Ordinal Regression", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/b130/1c722886b6028d11e4c2084ee96466218be4.pdf"}, {"id": "b5f9306c3207ac12ac761e7d028c78b3009a219c", "title": "Age estimation based on extended non-negative matrix factorization", "year": "2011", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6093779"}, {"id": "22532c6e38ded690dc1420f05c18e23f6f24804d", "title": "Chapter 5 Genetic & Evolutionary Biometrics", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/2253/2c6e38ded690dc1420f05c18e23f6f24804d.pdf"}, {"id": "a591639bfcabc4091ff556364074c58521159ff9", "title": "General structured sparse learning for human facial age estimation", "year": 2017, "pdf": null}, {"id": "84fd7c00243dc4f0df8ab1a8c497313ca4f8bd7b", "title": "Perceived Age Estimation from Face Images", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/84fd/7c00243dc4f0df8ab1a8c497313ca4f8bd7b.pdf"}, {"id": "68c17aa1ecbff0787709be74d1d98d9efd78f410", "title": "Gender Classification from Face Images Using Mutual Information and Feature Fusion", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/68c1/7aa1ecbff0787709be74d1d98d9efd78f410.pdf"}, {"id": "997c7ebf467c579b55859315c5a7f15c1df43432", "title": "A Study of Convolutional Sparse Feature Learning for Human Age Estimate", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/FG.2017.141"}, {"id": "ed32df6b122b15a52238777c9993ed31107b4bed", "title": "Age face simulation using aging functions on global and local features with residual images", "year": "2017", "pdf": "http://doi.org/10.1016/j.eswa.2017.03.008"}, {"id": "8411fe1142935a86b819f065cd1f879f16e77401", "title": "Facial Recognition using Modified Local Binary Pattern and Random Forest", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/8411/fe1142935a86b819f065cd1f879f16e77401.pdf"}, {"id": "48cfc5789c246c6ad88ff841701204fc9d6577ed", "title": "Age Invariant Face Recognition Based on DCT Feature Extraction and Kernel Fisher Analysis", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/48cf/c5789c246c6ad88ff841701204fc9d6577ed.pdf"}, {"id": "fa23122db319440fb5a7253e19709f992b4571b9", "title": "Human Age Estimation via Geometric and Textural Features", "year": "2012", "pdf": "https://pdfs.semanticscholar.org/fa23/122db319440fb5a7253e19709f992b4571b9.pdf"}, {"id": "12e4545d07e1793df87520f384b37a015815d2f7", "title": "Age invariant face recognition: a survey on facial aging databases, techniques and effect of aging", "year": "2018", "pdf": "http://doi.org/10.1007/s10462-018-9661-z"}, {"id": "4919663c62174a9bc0cc7f60da8f96974b397ad2", "title": "Human age estimation using enhanced bio-inspired features (EBIF)", "year": 2010, "pdf": "https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/EBIF_5-2-2010_v_5.pdf"}, {"id": "e9fcd15bcb0f65565138dda292e0c71ef25ea8bb", "title": "Analysing Facial Regions for Face Recognition Using Forensic Protocols", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/e9fc/d15bcb0f65565138dda292e0c71ef25ea8bb.pdf"}, {"id": "47a003e6bbfc5bf04a099ca53c67ddfdbea71315", "title": "Q-stack aging model for face verification", "year": 2009, "pdf": "http://www.researchgate.net/profile/Andrzej_Drygajlo/publication/228669241_Q-stack_aging_model_for_face_verification/links/09e4150f7ffb6d3946000000.pdf"}, {"id": "7553fba5c7f73098524fbb58ca534a65f08e91e7", "title": "A Practical Approach for Determination of Human Gender & Age", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/7553/fba5c7f73098524fbb58ca534a65f08e91e7.pdf"}, {"id": "44fb4dcf88eb482e2ab79fd4540caf941613b970", "title": "Perceived Age Estimation under Lighting Condition Change by Covariate Shift Adaptation", "year": 2010, "pdf": "http://www.researchgate.net/profile/Masashi_Sugiyama/publication/220930547_Perceived_Age_Estimation_under_Lighting_Condition_Change_by_Covariate_Shift_Adaptation/links/0fcfd5122b4d406edd000000.pdf"}, {"id": "f47404424270f6a20ba1ba8c2211adfba032f405", "title": "Identification of Face Age range Group using Neural Network", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/f474/04424270f6a20ba1ba8c2211adfba032f405.pdf"}, {"id": "a608c5f8fd42af6e9bd332ab516c8c2af7063c61", "title": "Age Estimation via Grouping and Decision Fusion", "year": 2015, "pdf": "http://mcl.usc.edu/wp-content/uploads/2016/01/Liu-TIFS-2015-10.pdf"}, {"id": "0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e", "title": "Large Age-Gap face verification by feature injection in deep networks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf"}]}
\ No newline at end of file diff --git a/site/datasets/unknown/msceleb.json b/site/datasets/unknown/msceleb.json new file mode 100644 index 00000000..fce4f9fe --- /dev/null +++ b/site/datasets/unknown/msceleb.json @@ -0,0 +1 @@ +{"id": "291265db88023e92bb8c8e6390438e5da148e8f5", "paper": {"paper_id": "291265db88023e92bb8c8e6390438e5da148e8f5", "key": "msceleb", "title": "MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf", "address": "", "name": "MsCeleb"}, "citations": [{"id": "39ed31ced75e6151dde41944a47b4bdf324f922b", "title": "Pose-Guided Photorealistic Face Rotation", "year": "", "pdf": "https://pdfs.semanticscholar.org/39ed/31ced75e6151dde41944a47b4bdf324f922b.pdf"}, {"id": "69adf2f122ff18848ff85e8de3ee3b2bc495838e", "title": "Arbitrary Facial Attribute Editing: Only Change What You Want", "year": "2017", "pdf": null}, {"id": "352a620f0b96a7e76b9195a7038d5eec257fd994", "title": "Kinship Classification through Latent Adaptive Subspace", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373823"}, {"id": "571b83f7fc01163383e6ca6a9791aea79cafa7dd", "title": "SeqFace: Make full use of sequence information for face recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1803.06524.pdf"}, {"id": "c220f457ad0b28886f8b3ef41f012dd0236cd91a", "title": "Crystal Loss and Quality Pooling for Unconstrained Face Verification and Recognition", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/c220/f457ad0b28886f8b3ef41f012dd0236cd91a.pdf"}, {"id": "7f97a36a5a634c30de5a8e8b2d1c812ca9f971ae", "title": "Incremental Classifier Learning with Generative Adversarial Networks", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/7f97/a36a5a634c30de5a8e8b2d1c812ca9f971ae.pdf"}, {"id": "fed8cc533037d7d925df572a440fd89f34d9c1fd", "title": "Simple Triplet Loss Based on Intra/Inter-Class Metric Learning for Face Verification", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.194"}, {"id": "377c6563f97e76a4dc836a0bd23d7673492b1aae", "title": "Motion deblurring of faces", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/377c/6563f97e76a4dc836a0bd23d7673492b1aae.pdf"}, {"id": "a89cbc90bbb4477a48aec185f2a112ea7ebe9b4d", "title": "High Performance Large Scale Face Recognition with Multi-cognition Softmax and Feature Retrieval", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265434"}, {"id": "cf4e5206722ba16061982b885f8c7c86beacd27c", "title": "Group-Level Emotion Recognition Using Hybrid Deep Models Based on Faces, Scenes, Skeletons and Visual Attentions", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3264990"}, {"id": "8509abbde2f4b42dc26a45cafddcccb2d370712f", "title": "A way to improve precision of face recognition in SIPP without retrain of the deep neural network model", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/ad9a/169042d887c33cfcec2716a453a0d3abcb0c.pdf"}, {"id": "3dc522a6576c3475e4a166377cbbf4ba389c041f", "title": "The iNaturalist Challenge 2017 Dataset", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/3dc5/22a6576c3475e4a166377cbbf4ba389c041f.pdf"}, {"id": "cb38b4a5e517b4bcb00efbb361f4bdcbcf1dca2c", "title": "Learning towards Minimum Hyperspherical Energy", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.09298.pdf"}, {"id": "d8526863f35b29cbf8ac2ae756eaae0d2930ffb1", "title": "Face Generation for Low-Shot Learning Using Generative Adversarial Networks", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265439"}, {"id": "1fc249ec69b3e23856b42a4e591c59ac60d77118", "title": "Evaluation of a 3D-aided pose invariant 2D face recognition system", "year": 2017, "pdf": "https://doi.org/10.1109/BTAS.2017.8272729"}, {"id": "fb3ff56ab12bd250caf8254eca30cd97984a949a", "title": "Face recognition Face2vec based on deep learning: Small database case", "year": 2017, "pdf": "https://doi.org/10.3103/S0146411617010072"}, {"id": "76cd5e43df44e389483f23cb578a9015d1483d70", "title": "Face Verification from Depth using Privileged Information", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/76cd/5e43df44e389483f23cb578a9015d1483d70.pdf"}, {"id": "98b2f21db344b8b9f7747feaf86f92558595990c", "title": "Semantically Decomposing the Latent Spaces of Generative Adversarial Networks", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/b9f0/29075a36f15202f0d213fe222dcf237fe65f.pdf"}, {"id": "459e840ec58ef5ffcee60f49a94424eb503e8982", "title": "One-shot Face Recognition by Promoting Underrepresented Classes", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/459e/840ec58ef5ffcee60f49a94424eb503e8982.pdf"}, {"id": "eb526174fa071345ff7b1fad1fad240cd943a6d7", "title": "Deeply vulnerable: a study of the robustness of face recognition to presentation attacks", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/eb52/6174fa071345ff7b1fad1fad240cd943a6d7.pdf"}, {"id": "040dc119d5ca9ea3d5fc39953a91ec507ed8cc5d", "title": "Large-scale Bisample Learning on ID vs. Spot Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.03018.pdf"}, {"id": "e3d76f1920c5bf4a60129516abb4a2d8683e48ae", "title": "I Know That Person: Generative Full Body and Face De-identification of People in Images", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014907"}, {"id": "282a3ee79a08486f0619caf0ada210f5c3572367", "title": "Accelerated Training for Massive Classification via Dynamic Class Selection", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/282a/3ee79a08486f0619caf0ada210f5c3572367.pdf"}, {"id": "c808c784237f167c78a87cc5a9d48152579c27a4", "title": "Know You at One Glance: A Compact Vector Representation for Low-Shot Learning", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265437"}, {"id": "48499deeaa1e31ac22c901d115b8b9867f89f952", "title": "Interim Report of Final Year Project HKU-Face : A Large Scale Dataset for Deep Face Recognition", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/4849/9deeaa1e31ac22c901d115b8b9867f89f952.pdf"}, {"id": "4b48e912a17c79ac95d6a60afed8238c9ab9e553", "title": "Minimum Margin Loss for Deep Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.06741.pdf"}, {"id": "c1482491f553726a8349337351692627a04d5dbe", "title": "When Follow is Just One Click Away: Understanding Twitter Follow Behavior in the 2016 U.S. Presidential Election", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/c148/2491f553726a8349337351692627a04d5dbe.pdf"}, {"id": "727d03100d4a8e12620acd7b1d1972bbee54f0e6", "title": "von Mises-Fisher Mixture Model-based Deep learning: Application to Face Verification", "year": "2017", "pdf": "https://arxiv.org/pdf/1706.04264.pdf"}, {"id": "5121f42de7cb9e41f93646e087df82b573b23311", "title": "Classifying Online Dating Profiles on Tinder using FaceNet Facial Embeddings", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/5121/f42de7cb9e41f93646e087df82b573b23311.pdf"}, {"id": "2770b095613d4395045942dc60e6c560e882f887", "title": "GridFace: Face Rectification via Learning Local Homography Transformations", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.06210.pdf"}, {"id": "06bd34951305d9f36eb29cf4532b25272da0e677", "title": "A Fast and Accurate System for Face Detection, Identification, and Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.07586.pdf"}, {"id": "332548fd2e52b27e062bd6dcc1db0953ced6ed48", "title": "Low-Shot Face Recognition with Hybrid Classifiers", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265438"}, {"id": "5f64a2a9b6b3d410dd60dc2af4a58a428c5d85f9", "title": "Scalable Object Detection for Stylized Objects", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/e1dd/1c4de149c6b05eedd1728d57a18a074b9b2a.pdf"}, {"id": "e6b45d5a86092bbfdcd6c3c54cda3d6c3ac6b227", "title": "Pairwise Relational Networks for Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.04976.pdf"}, {"id": "0c65226edb466204189b5aec8f1033542e2c17aa", "title": "A study of CNN outside of training conditions", "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296997"}, {"id": "a98316980b126f90514f33214dde51813693fe0d", "title": "Collaborations on YouTube: From Unsupervised Detection to the Impact on Video and Channel Popularity", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.01887.pdf"}, {"id": "c6eb026d3a0081f4cb5cde16d3170f8ecf8ce706", "title": "Face Recognition: From Traditional to Deep Learning Methods", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.00116.pdf"}, {"id": "6f5309d8cc76d3d300b72745887addd2a2480ba8", "title": "KinNet: Fine-to-Coarse Deep Metric Learning for Kinship Verification", "year": 2017, "pdf": null}, {"id": "7323b594d3a8508f809e276aa2d224c4e7ec5a80", "title": "An Experimental Evaluation of Covariates Effects on Unconstrained Face Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.05508.pdf"}, {"id": "94f74c6314ffd02db581e8e887b5fd81ce288dbf", "title": "A Light CNN for Deep Face Representation with Noisy Labels", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/94f7/4c6314ffd02db581e8e887b5fd81ce288dbf.pdf"}, {"id": "fbc2f5cf943f440b1ba2374ecf82d0176a44f1eb", "title": "3D-Aided Dual-Agent GANs for Unconstrained Face Recognition.", "year": "2018", "pdf": "https://www.ncbi.nlm.nih.gov/pubmed/30040629"}, {"id": "e13360cda1ebd6fa5c3f3386c0862f292e4dbee4", "title": "Range Loss for Deep Face Recognition with Long-tail", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/e133/60cda1ebd6fa5c3f3386c0862f292e4dbee4.pdf"}, {"id": "3baa3d5325f00c7edc1f1427fcd5bdc6a420a63f", "title": "Enhancing convolutional neural networks for face recognition with occlusion maps and batch triplet loss", "year": "2018", "pdf": "https://arxiv.org/pdf/1707.07923.pdf"}, {"id": "beab10d1bdb0c95b2f880a81a747f6dd17caa9c2", "title": "DeepDeblur: Fast one-step blurry face images restoration", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/beab/10d1bdb0c95b2f880a81a747f6dd17caa9c2.pdf"}, {"id": "3b4da93fbdf7ae520fa00d39ffa694e850b85162", "title": "Face-Voice Matching using Cross-modal Embeddings", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3240601"}, {"id": "59fc69b3bc4759eef1347161e1248e886702f8f7", "title": "Final Report of Final Year Project HKU-Face : A Large Scale Dataset for Deep Face Recognition", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/59fc/69b3bc4759eef1347161e1248e886702f8f7.pdf"}, {"id": "406c5aeca71011fd8f8bd233744a81b53ccf635a", "title": "Scalable softmax loss for face verification", "year": 2017, "pdf": null}, {"id": "173657da03e3249f4e47457d360ab83b3cefbe63", "title": "HKU-Face : A Large Scale Dataset for Deep Face Recognition Final Report", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/1736/57da03e3249f4e47457d360ab83b3cefbe63.pdf"}, {"id": "9329523dc0bd4e2896d5f63cf2440f21b7a16f16", "title": "Do They All Look the Same? Deciphering Chinese, Japanese and Koreans by Fine-Grained Deep Learning", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d853/107e81c3db4a7909b599bff82ab1c48772af.pdf"}, {"id": "b4ee64022cc3ccd14c7f9d4935c59b16456067d3", "title": "Unsupervised Cross-Domain Image Generation", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/b4ee/64022cc3ccd14c7f9d4935c59b16456067d3.pdf"}, {"id": "31b05f65405534a696a847dd19c621b7b8588263", "title": "UMDFaces: An annotated face dataset for training deep networks", "year": 2017, "pdf": "http://arxiv.org/abs/1611.01484"}, {"id": "f553f8022b1417bc7420523220924b04e3f27b8e", "title": "Finding your Lookalike: Measuring Face Similarity Rather than Face Identity", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.05252.pdf"}, {"id": "85860d38c66a5cf2e6ffd6475a3a2ba096ea2920", "title": "Celeb-500K: A Large Training Dataset for Face Recognition", "year": "2018", "pdf": "http://doi.org/10.1109/ICIP.2018.8451704"}, {"id": "cb2470aade8e5630dcad5e479ab220db94ecbf91", "title": "Exploring Facial Differences in European Countries Boundary by Fine-Tuned Neural Networks", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397018"}, {"id": "5bf85a60cf7506b0c14d484a2a50f553ae9a45a9", "title": "Conditional Expression Synthesis with Face Parsing Transformation", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3240647"}, {"id": "dd8084b2878ca95d8f14bae73e1072922f0cc5da", "title": "Model Distillation with Knowledge Transfer in Face Classification, Alignment and Verification", "year": "2017", "pdf": "https://arxiv.org/pdf/1709.02929.pdf"}, {"id": "d1a43737ca8be02d65684cf64ab2331f66947207", "title": "IJB \u2013 S : IARPA Janus Surveillance Video Benchmark \u2217", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/d1a4/3737ca8be02d65684cf64ab2331f66947207.pdf"}, {"id": "3ac09c2589178dac0b6a2ea2edf04b7629672d81", "title": "Wasserstein CNN: Learning Invariant Features for NIR-VIS Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1708.02412.pdf"}, {"id": "266766818dbc5a4ca1161ae2bc14c9e269ddc490", "title": "Boosting a Low-Cost Smart Home Environment with Usage and Access Control Rules", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/2667/66818dbc5a4ca1161ae2bc14c9e269ddc490.pdf"}, {"id": "ba788365d70fa6c907b71a01d846532ba3110e31", "title": "Robust Conditional Generative Adversarial Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.08657.pdf"}, {"id": "11ad162b3165b4353df8d7b4153fb26d6a310d11", "title": "Recognizing Families In the Wild (RFIW): Data Challenge Workshop in conjunction with ACM MM 2017", "year": 2017, "pdf": null}, {"id": "c866a2afc871910e3282fd9498dce4ab20f6a332", "title": "Surveillance Face Recognition Challenge", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.09691.pdf"}, {"id": "6d91da37627c05150cb40cac323ca12a91965759", "title": "Gender Politics in the 2016 U.S. Presidential Election: A Computer Vision Approach", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/6d91/da37627c05150cb40cac323ca12a91965759.pdf"}, {"id": "3827f1cab643a57e3cd22fbffbf19dd5e8a298a8", "title": "One-Shot Face Recognition via Generative Learning", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373804"}, {"id": "6afe1f668eea8dfdd43f0780634073ed4545af23", "title": "Deep learning for content-based video retrieval in film and television production", "year": 2017, "pdf": "https://doi.org/10.1007/s11042-017-4962-9"}, {"id": "3933e323653ff27e68c3458d245b47e3e37f52fd", "title": "Evaluation of a 3 D-aided Pose Invariant 2 D Face Recognition System", "year": "2017", "pdf": "https://pdfs.semanticscholar.org/3933/e323653ff27e68c3458d245b47e3e37f52fd.pdf"}, {"id": "cfb8bc66502fb5f941ecdb22aec1fdbfdb73adce", "title": "Git Loss for Deep Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.08512.pdf"}, {"id": "12ba7c6f559a69fbfaacf61bfb2f8431505b09a0", "title": "DocFace+: ID Document to Selfie Matching", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.05620.pdf"}, {"id": "9a42c519f0aaa68debbe9df00b090ca446d25bc4", "title": "Face Recognition via Centralized Coordinate Learning", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/9a42/c519f0aaa68debbe9df00b090ca446d25bc4.pdf"}, {"id": "3e3227c8e9f44593d2499f4d1302575c77977b2e", "title": "Facial Expression Recognition Using a Large Out-of-Context Dataset", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8347112"}, {"id": "c7c8d150ece08b12e3abdb6224000c07a6ce7d47", "title": "DeMeshNet: Blind Face Inpainting for Deep MeshFace Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1611.05271.pdf"}, {"id": "4209783b0cab1f22341f0600eed4512155b1dee6", "title": "Accurate and Efficient Similarity Search for Large Scale Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.00365.pdf"}, {"id": "7fa4e972da46735971aad52413d17c4014c49e6e", "title": "How to Train Triplet Networks with 100K Identities?", "year": "2017", "pdf": "https://arxiv.org/pdf/1709.02940.pdf"}, {"id": "ad01c5761c89fdf523565cc0dec77b9a6ec8e694", "title": "Global and Local Consistent Wavelet-domain Age Synthesis", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.07764.pdf"}, {"id": "8199803f476c12c7f6c0124d55d156b5d91314b6", "title": "The iNaturalist Species Classification and Detection Dataset", "year": "2017", "pdf": "https://arxiv.org/pdf/1707.06642.pdf"}, {"id": "768f6a14a7903099729872e0db231ea814eb05e9", "title": "De-Mark GAN: Removing Dense Watermark with Generative Adversarial Network", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411205"}, {"id": "65984ea40c3b17bb8965c215b61972cd660f61a7", "title": "Doppelganger Mining for Face Representation Learning", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265436"}, {"id": "ff44d8938c52cfdca48c80f8e1618bbcbf91cb2a", "title": "Towards Video Captioning with Naming: A Novel Dataset and a Multi-modal Approach", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/ff44/d8938c52cfdca48c80f8e1618bbcbf91cb2a.pdf"}, {"id": "832a9584e85af1675d49ee35fd13283b21ce3a3f", "title": "Generating Photo-Realistic Training Data to Improve Face Recognition Accuracy", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.00112.pdf"}, {"id": "069bb452e015ef53f0ef30e9690e460ccc73cf03", "title": "Multicolumn Networks for Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.09192.pdf"}, {"id": "2e0d56794379c436b2d1be63e71a215dd67eb2ca", "title": "Improving precision and recall of face recognition in SIPP with combination of modified mean search and LSH", "year": "2017", "pdf": "https://arxiv.org/pdf/1709.03872.pdf"}, {"id": "38f1fac3ed0fd054e009515e7bbc72cdd4cf801a", "title": "Finding Person Relations in Image Data of the Internet Archive", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.08246.pdf"}, {"id": "0ed91520390ebdee13a0ac13d028f65d959bdc10", "title": "Hard Example Mining with Auxiliary Embeddings", "year": "", "pdf": "https://pdfs.semanticscholar.org/0ed9/1520390ebdee13a0ac13d028f65d959bdc10.pdf"}, {"id": "436d80cc1b52365ed7b2477c0b385b6fbbb51d3b", "title": "Probabilistic Knowledge Transfer for Deep Representation Learning", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/436d/80cc1b52365ed7b2477c0b385b6fbbb51d3b.pdf"}, {"id": "6ca6ade6c9acb833790b1b4e7ee8842a04c607f7", "title": "Deep Transfer Network for Unconstrained Face Verification", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3234805"}]}
\ No newline at end of file diff --git a/site/datasets/unknown/pipa.json b/site/datasets/unknown/pipa.json new file mode 100644 index 00000000..98e2725e --- /dev/null +++ b/site/datasets/unknown/pipa.json @@ -0,0 +1 @@ +{"id": "0a85bdff552615643dd74646ac881862a7c7072d", "paper": {"paper_id": "0a85bdff552615643dd74646ac881862a7c7072d", "key": "pipa", "title": "Beyond frontal faces: Improving Person Recognition using multiple cues", "year": 2015, "pdf": "https://doi.org/10.1109/CVPR.2015.7299113", "address": "", "name": "PIPA"}, "citations": [{"id": "a1e97c4043d5cc9896dc60ae7ca135782d89e5fc", "title": "Re-identification of Humans in Crowds using Personal, Social and Environmental Constraints", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/a1e9/7c4043d5cc9896dc60ae7ca135782d89e5fc.pdf"}, {"id": "8886b21f97c114a23b24dc7025bbf42885adc3a7", "title": "Privacy protection performance of De-identified face images with and without background", "year": 2016, "pdf": "https://doi.org/10.1109/MIPRO.2016.7522350"}, {"id": "10ab1b48b2a55ec9e2920a5397febd84906a7769", "title": "I-Pic: A Platform for Privacy-Compliant Image Capture", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/10ab/1b48b2a55ec9e2920a5397febd84906a7769.pdf"}, {"id": "ae936628e78db4edb8e66853f59433b8cc83594f", "title": "Person Re-identification via Structured Prediction", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/ae93/6628e78db4edb8e66853f59433b8cc83594f.pdf"}, {"id": "036fac2b87cf04c3d93e8a59da618d56a483a97d", "title": "Query Adaptive Late Fusion for Image Retrieval", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.13103.pdf"}, {"id": "ef999ab2f7b37f46445a3457bf6c0f5fd7b5689d", "title": "Improving face verification in photo albums by combining facial recognition and metadata with cross-matching", "year": "2016", "pdf": "https://pdfs.semanticscholar.org/001e/ad9b99ee57af44e1831be1670c40711d348d.pdf"}, {"id": "6f22628d34a486d73c6b46eb071200a00e3abae3", "title": "Learning Pose-Aware Models for Pose-Invariant Face Recognition in the Wild.", "year": "2018", "pdf": "https://www.ncbi.nlm.nih.gov/pubmed/29994497"}, {"id": "d6a9ea9b40a7377c91c705f4c7f206a669a9eea2", "title": "Visual Representations for Fine-grained Categorization", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/d6a9/ea9b40a7377c91c705f4c7f206a669a9eea2.pdf"}, {"id": "a3d8b5622c4b9af1f753aade57e4774730787a00", "title": "Pose-Aware Person Recognition", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.10120.pdf"}, {"id": "232f21aa133b75156665175158c65b89bded4032", "title": "Multi-Human Parsing Machines", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3240515"}, {"id": "1451e7b11e66c86104f9391b80d9fb422fb11c01", "title": "Image privacy protection with secure JPEG transmorphing", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/1451/e7b11e66c86104f9391b80d9fb422fb11c01.pdf"}, {"id": "2f7e9b45255c9029d2ae97bbb004d6072e70fa79", "title": "cvpaper.challenge in 2015 - A review of CVPR2015 and DeepSurvey", "year": 2016, "pdf": "http://pdfs.semanticscholar.org/2f7e/9b45255c9029d2ae97bbb004d6072e70fa79.pdf"}, {"id": "d94d7ff6f46ad5cab5c20e6ac14c1de333711a0c", "title": "Face Album: Towards automatic photo management based on person identity on mobile phones", "year": 2017, "pdf": "https://doi.org/10.1109/ICASSP.2017.7952713"}, {"id": "28cd46a078e8fad370b1aba34762a874374513a5", "title": "cvpaper.challenge in 2016: Futuristic Computer Vision through 1, 600 Papers Survey", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/28cd/46a078e8fad370b1aba34762a874374513a5.pdf"}, {"id": "c8b9217ee36aebb9735e525b718490dc27c8c1cb", "title": "Improving person recognition by weight adaptation of soft biometrics", "year": 2016, "pdf": null}, {"id": "7bfc7e45967897223b5d9278d0ef29cb2b5789be", "title": "Up to a Limit?: Privacy Concerns of Bystanders and Their Willingness to Share Additional Information with Visually Impaired Users of Assistive Technologies", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3264899"}, {"id": "af4759f5e636b5d9049010d5f0e2b0df2a69cd72", "title": "Understanding Humans in Crowded Scenes: Deep Nested Adversarial Learning and A New Benchmark for Multi-Human Parsing", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3240509"}, {"id": "2c92839418a64728438c351a42f6dc5ad0c6e686", "title": "Pose-Aware Face Recognition in the Wild", "year": 2016, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Masi_Pose-Aware_Face_Recognition_CVPR_2016_paper.pdf"}, {"id": "cfd4004054399f3a5f536df71f9b9987f060f434", "title": "Person Recognition in Social Media Photos", "year": "2018", "pdf": "https://arxiv.org/pdf/1710.03224.pdf"}]}
\ No newline at end of file diff --git a/site/datasets/unknown/pubfig_83.json b/site/datasets/unknown/pubfig_83.json new file mode 100644 index 00000000..d4040d2e --- /dev/null +++ b/site/datasets/unknown/pubfig_83.json @@ -0,0 +1 @@ +{"id": "9c23859ec7313f2e756a3e85575735e0c52249f4", "paper": {"paper_id": "9c23859ec7313f2e756a3e85575735e0c52249f4", "key": "pubfig_83", "title": "Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook", "year": "2011", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788", "address": {"address": "Harvard University", "lat": "42.36782045", "lng": "-71.12666653", "type": "edu"}, "name": "pubfig83"}, "citations": [{"id": "5d01283474b73a46d80745ad0cc0c4da14aae194", "title": "Classification schemes based on Partial Least Squares for face identification", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/5d01/283474b73a46d80745ad0cc0c4da14aae194.pdf"}, {"id": "91e17338a12b5e570907e816bff296b13177971e", "title": "Towards open-set face recognition using hashing functions", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272751"}, {"id": "4468afcc523b54d909b2b7e6e747c5c0f3f61c89", "title": "On the role of shape prototypes in hierarchical models of vision", "year": 2013, "pdf": "http://www.santafe.edu/media/workingpapers/13-04-013.pdf"}, {"id": "7bdf4b717371ec9d59f8dff39fb7c1e03b8863e0", "title": "Vehicle License Plate Recognition With Random Convolutional Networks", "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2014.52"}, {"id": "ec9e8d69b67bcb2814b538091fa288b6bdbb990f", "title": "GURLS: a Toolbox for Regularized Least Squares Learning", "year": 2012, "pdf": "http://pdfs.semanticscholar.org/ec9e/8d69b67bcb2814b538091fa288b6bdbb990f.pdf"}, {"id": "275b3cb7c780c663eabbf4d6c6cbc8fe24287c70", "title": "The Impact of Bio-Inspired Approaches Toward the Advancement of Face Recognition", "year": 2015, "pdf": "http://doi.acm.org/10.1145/2791121"}, {"id": "5c5dbca68946434afb201f0df90011104c85e4c4", "title": "Robust 3D Patch-Based Face Hallucination", "year": 2017, "pdf": "http://ies.anthropomatik.kit.edu/ies/download/publ/ies_2017_qu_wacv.pdf"}, {"id": "58cb6677b77d5a79fc5b8058829693ca30b36ac5", "title": "Learning Similarity Metrics by Factorising Adjacency Matrices", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/ab10/88f765b5e9180cce71624ca2d1990ebd719c.pdf"}, {"id": "de0aaf8c6b5dea97327e8ef8060d9a708bf564af", "title": "A Benchmark for Iris Location and a Deep Learning Detector Evaluation", "year": "2018", "pdf": "https://arxiv.org/pdf/1803.01250.pdf"}, {"id": "9d839dfc9b6a274e7c193039dfa7166d3c07040b", "title": "Augmented faces", "year": 2011, "pdf": "http://cgit.nutn.edu.tw:8080/cgit/PaperDL/LZJ_120522184630.PDF"}, {"id": "61084a25ebe736e8f6d7a6e53b2c20d9723c4608", "title": "Face recognition for web-scale datasets", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/6108/4a25ebe736e8f6d7a6e53b2c20d9723c4608.pdf"}, {"id": "706b9767a444de4fe153b2f3bff29df7674c3161", "title": "Fast Metric Learning For Deep Neural Networks", "year": "2015", "pdf": "https://arxiv.org/pdf/1511.06442.pdf"}, {"id": "7b455cbb320684f78cd8f2443f14ecf5f50426db", "title": "A Fast and Robust Negative Mining Approach for Enrollment in Face Recognition Systems", "year": 2017, "pdf": "http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2017.33"}, {"id": "5e6ba16cddd1797853d8898de52c1f1f44a73279", "title": "Face Identification with Second-Order Pooling", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/5e6b/a16cddd1797853d8898de52c1f1f44a73279.pdf"}, {"id": "2ad29b2921aba7738c51d9025b342a0ec770c6ea", "title": "Where is my puppy? Retrieving lost dogs by facial features", "year": 2016, "pdf": "http://arxiv.org/pdf/1510.02781v1.pdf"}, {"id": "88bee9733e96958444dc9e6bef191baba4fa6efa", "title": "Extending Face Identification to Open-Set Face Recognition", "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2014.23"}, {"id": "14fed18d838bf6b89d98837837ff314e61ab7c60", "title": "Deep Learning with Differential Privacy", "year": "2016", "pdf": "https://arxiv.org/pdf/1607.00133.pdf"}, {"id": "2bd49bdfc61788c8ac5621fe7f08a06dd2152fb9", "title": "Pose Invariant Face Recognition Using Neuro - Biologically Inspired Features Pramod", "year": "", "pdf": "http://pdfs.semanticscholar.org/2bd4/9bdfc61788c8ac5621fe7f08a06dd2152fb9.pdf"}, {"id": "befa14324bb71e5d0f30808e54abc970d52f758c", "title": "A Convex Approach for Image Hallucination", "year": 2013, "pdf": "http://pdfs.semanticscholar.org/e7a8/0a088ce0ab6d608d1e719cc1b44caa6a9aa6.pdf"}, {"id": "b29e60ddcabff5002c3ddec135ec94dd991d8d5a", "title": "Compressing deep convolutional neural networks in visual emotion recognition", "year": 2017, "pdf": "http://pdfs.semanticscholar.org/b29e/60ddcabff5002c3ddec135ec94dd991d8d5a.pdf"}, {"id": "e99718d08aca2c49cd2848eebdbb7c7855b4e484", "title": "Deep neural networks and maximum likelihood search for approximate nearest neighbor in video-based image recognition", "year": 2017, "pdf": null}, {"id": "28475d32e92593b426be8ffdc4e852ed2c63affc", "title": "Comparative study between deep learning and bag of visual words for wild-animal recognition", "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7850111"}, {"id": "080936799b4ada10785f0e227a2d10b054473127", "title": "Fast and scalable enrollment for face identification based on Partial Least Squares", "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553714"}, {"id": "288bddfabe739b32721df62d821632e3dafed06a", "title": "Robust multi-image based blind face hallucination", "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/3B_088_ext.pdf"}, {"id": "cc1b7592f29fc3a945a38c742b356167731ef59f", "title": "Image-Set Matching by Two Dimensional Generalized Mutual Subspace Method", "year": 2016, "pdf": "https://doi.org/10.1109/BRACIS.2016.034"}]}
\ No newline at end of file diff --git a/site/datasets/unknown/uccs.json b/site/datasets/unknown/uccs.json new file mode 100644 index 00000000..f8f74114 --- /dev/null +++ b/site/datasets/unknown/uccs.json @@ -0,0 +1 @@ +{"id": "07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1", "paper": {"paper_id": "07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1", "key": "uccs", "title": "Large scale unconstrained open set face database", "year": 2013, "pdf": "http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf", "address": {"address": "University of Colorado at Colorado Springs", "lat": "38.89646790", "lng": "-104.80505940", "type": "edu"}, "name": "UCCS"}, "citations": [{"id": "63d64d07966d4d128581246a891ff1d3dd3a2a96", "title": "Learning face similarity for re-identification from real surveillance video: A deep metric solution", "year": 2017, "pdf": "https://doi.org/10.1109/BTAS.2017.8272704"}, {"id": "f3b84a03985de3890b400b68e2a92c0a00afd9d0", "title": "Large Variability Surveillance Camera Face Database", "year": 2015, "pdf": null}]}
\ No newline at end of file diff --git a/site/datasets/unknown/umd_faces.json b/site/datasets/unknown/umd_faces.json new file mode 100644 index 00000000..2ff353ae --- /dev/null +++ b/site/datasets/unknown/umd_faces.json @@ -0,0 +1 @@ +{"id": "31b05f65405534a696a847dd19c621b7b8588263", "paper": {"paper_id": "31b05f65405534a696a847dd19c621b7b8588263", "key": "umd_faces", "title": "UMDFaces: An annotated face dataset for training deep networks", "year": 2017, "pdf": "http://arxiv.org/abs/1611.01484", "address": "", "name": "UMD"}, "citations": [{"id": "173657da03e3249f4e47457d360ab83b3cefbe63", "title": "HKU-Face : A Large Scale Dataset for Deep Face Recognition Final Report", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/1736/57da03e3249f4e47457d360ab83b3cefbe63.pdf"}, {"id": "c220f457ad0b28886f8b3ef41f012dd0236cd91a", "title": "Crystal Loss and Quality Pooling for Unconstrained Face Verification and Recognition", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/c220/f457ad0b28886f8b3ef41f012dd0236cd91a.pdf"}, {"id": "94f74c6314ffd02db581e8e887b5fd81ce288dbf", "title": "A Light CNN for Deep Face Representation with Noisy Labels", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/94f7/4c6314ffd02db581e8e887b5fd81ce288dbf.pdf"}, {"id": "57178b36c21fd7f4529ac6748614bb3374714e91", "title": "IARPA Janus Benchmark - C: Face Dataset and Protocol", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217"}, {"id": "28d4e027c7e90b51b7d8908fce68128d1964668a", "title": "Level Playing Field for Million Scale Face Recognition", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.00393.pdf"}, {"id": "cfb8bc66502fb5f941ecdb22aec1fdbfdb73adce", "title": "Git Loss for Deep Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.08512.pdf"}, {"id": "06bd34951305d9f36eb29cf4532b25272da0e677", "title": "A Fast and Accurate System for Face Detection, Identification, and Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.07586.pdf"}, {"id": "2d1f86e2c7ba81392c8914edbc079ac64d29b666", "title": "Deep Heterogeneous Feature Fusion for Template-Based Face Recognition", "year": 2017, "pdf": "https://doi.org/10.1109/WACV.2017.71"}, {"id": "cc47368fe303c6cbda38caf5ac0e1d1c9d7e2a52", "title": "University Classroom Attendance Based on Deep Learning", "year": 2017, "pdf": null}, {"id": "6932baa348943507d992aba75402cfe8545a1a9b", "title": "Stacked Hourglass Network for Robust Facial Landmark Localisation", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014987"}, {"id": "7323b594d3a8508f809e276aa2d224c4e7ec5a80", "title": "An Experimental Evaluation of Covariates Effects on Unconstrained Face Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.05508.pdf"}, {"id": "a2e0966f303f38b58b898d388d1c83e40b605262", "title": "ECLIPSE: Ensembles of Centroids Leveraging Iteratively Processed Spatial Eclipse Clustering", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354125"}, {"id": "069bb452e015ef53f0ef30e9690e460ccc73cf03", "title": "Multicolumn Networks for Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.09192.pdf"}, {"id": "c866a2afc871910e3282fd9498dce4ab20f6a332", "title": "Surveillance Face Recognition Challenge", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.09691.pdf"}, {"id": "832a9584e85af1675d49ee35fd13283b21ce3a3f", "title": "Generating Photo-Realistic Training Data to Improve Face Recognition Accuracy", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.00112.pdf"}, {"id": "c6eb026d3a0081f4cb5cde16d3170f8ecf8ce706", "title": "Face Recognition: From Traditional to Deep Learning Methods", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.00116.pdf"}, {"id": "377c6563f97e76a4dc836a0bd23d7673492b1aae", "title": "Motion deblurring of faces", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/377c/6563f97e76a4dc836a0bd23d7673492b1aae.pdf"}, {"id": "7b8aa3ebeae17e5266dac23e87f603a5d5f7b1e3", "title": "Open Set Logo Detection and Retrieval", "year": "2018", "pdf": "https://arxiv.org/pdf/1710.10891.pdf"}, {"id": "65984ea40c3b17bb8965c215b61972cd660f61a7", "title": "Doppelganger Mining for Face Representation Learning", "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265436"}, {"id": "4f623e3821d14553b3b286e20910db9225fb723f", "title": "Audio-Visual Person Recognition in Multimedia Data From the Iarpa Janus Program", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462122"}, {"id": "e4c3d5d43cb62ac5b57d74d55925bdf76205e306", "title": "Average Biased ReLU Based CNN Descriptor for Improved Face Retrieval", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/e4c3/d5d43cb62ac5b57d74d55925bdf76205e306.pdf"}, {"id": "9ea37d031a8f112292c0d0f8d731b837462714e9", "title": "Face Recognition: From Traditional to Deep Learning Methods", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.00116.pdf"}, {"id": "c69916375fec3c3ebba3aafb31c99a2beb0151c4", "title": "Face recognition in video surveillance from a single reference sample through domain adaptation", "year": "2017", "pdf": null}, {"id": "3bb670b2afdcc45da2b09a02aac07e22ea7dbdc2", "title": "Disentangling 3 D Pose in A Dendritic CNN for Unconstrained 2 D Face Alignment", "year": "2018", "pdf": null}, {"id": "8de1c724a42d204c0050fe4c4b4e81a675d7f57c", "title": "Deep Face Recognition: A Survey", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8614364"}, {"id": "29933de38d72a0941d763b7ac5a480e733ef74a2", "title": "Open Set Logo Detection and Retrieval", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/cc89/b0ff89f5754b36363770eb81e1c8bc6e598a.pdf"}, {"id": "a50fa5048c61209149de0711b5f1b1806b43da00", "title": "Deep Features for Recognizing Disguised Faces in the Wild", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8575241"}, {"id": "9b666e20f570387214926eee542965f3fbe3cfce", "title": "Side Information for Face Completion: a Robust PCA Approach", "year": "2018", "pdf": "https://arxiv.org/pdf/1801.07580.pdf"}, {"id": "2306b2a8fba28539306052764a77a0d0f5d1236a", "title": "Surveillance Face Recognition Challenge", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.09691.pdf"}, {"id": "944ea33211d67663e04d0181843db634e42cb2ca", "title": "Crystal Loss and Quality Pooling for Unconstrained Face Verification and Recognition.", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.01159.pdf"}]}
\ No newline at end of file diff --git a/site/datasets/unknown/vgg_faces2.json b/site/datasets/unknown/vgg_faces2.json new file mode 100644 index 00000000..26f5ffdd --- /dev/null +++ b/site/datasets/unknown/vgg_faces2.json @@ -0,0 +1 @@ +{"id": "eb027969f9310e0ae941e2adee2d42cdf07d938c", "paper": {"paper_id": "eb027969f9310e0ae941e2adee2d42cdf07d938c", "key": "vgg_faces2", "title": "VGGFace2: A Dataset for Recognising Faces across Pose and Age", "year": "2018", "pdf": "https://arxiv.org/pdf/1710.08092.pdf", "address": {"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}, "name": "VGG Face2"}, "citations": [{"id": "c6eb026d3a0081f4cb5cde16d3170f8ecf8ce706", "title": "Face Recognition: From Traditional to Deep Learning Methods", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.00116.pdf"}, {"id": "266766818dbc5a4ca1161ae2bc14c9e269ddc490", "title": "Boosting a Low-Cost Smart Home Environment with Usage and Access Control Rules", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/2667/66818dbc5a4ca1161ae2bc14c9e269ddc490.pdf"}, {"id": "8199803f476c12c7f6c0124d55d156b5d91314b6", "title": "The iNaturalist Species Classification and Detection Dataset", "year": "2017", "pdf": "https://arxiv.org/pdf/1707.06642.pdf"}, {"id": "a2344004f0e1409c0c9473d071a5cfd74bff0a5d", "title": "Learnable PINs: Cross-modal Embeddings for Person Identity", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.00833.pdf"}, {"id": "1dd3faf5488751c9de10977528ab96be24616138", "title": "Detecting Anomalous Faces with 'No Peeking' Autoencoders", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/1dd3/faf5488751c9de10977528ab96be24616138.pdf"}, {"id": "9a42c519f0aaa68debbe9df00b090ca446d25bc4", "title": "Face Recognition via Centralized Coordinate Learning", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/9a42/c519f0aaa68debbe9df00b090ca446d25bc4.pdf"}, {"id": "f616c433671302eff23923d38ea87223202818f6", "title": "Model-Reuse Attacks on Deep Learning Systems", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3243757"}, {"id": "e6b45d5a86092bbfdcd6c3c54cda3d6c3ac6b227", "title": "Pairwise Relational Networks for Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.04976.pdf"}, {"id": "85860d38c66a5cf2e6ffd6475a3a2ba096ea2920", "title": "Celeb-500K: A Large Training Dataset for Face Recognition", "year": "2018", "pdf": "http://doi.org/10.1109/ICIP.2018.8451704"}, {"id": "5c315aae464602115674716a7f976c4992fcb98e", "title": "Teachers\u2019 Perception in the Classroom", "year": "", "pdf": "https://pdfs.semanticscholar.org/5c31/5aae464602115674716a7f976c4992fcb98e.pdf"}, {"id": "4b48e912a17c79ac95d6a60afed8238c9ab9e553", "title": "Minimum Margin Loss for Deep Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.06741.pdf"}, {"id": "692aecba13add2b8c1d82db303f5b2ec743ceb44", "title": "FaceForensics: A Large-scale Video Dataset for Forgery Detection in Human Faces", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/692a/ecba13add2b8c1d82db303f5b2ec743ceb44.pdf"}, {"id": "9ebe5d78163a91239f10c453d76082dfa329851d", "title": "Teacher's Perception in the Classroom", "year": "2018", "pdf": "https://arxiv.org/pdf/1805.08897.pdf"}, {"id": "be72b20247fb4dc4072d962ced77ed89aa40372f", "title": "Efficient Facial Representations for Age, Gender and Identity Recognition in Organizing Photo Albums using Multi-output CNN", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.07718.pdf"}, {"id": "bc15e0ebe7ff84e090aa2d74d753d87906d497f7", "title": "The Impact of Preprocessing on Deep Representations for Iris Recognition on Unconstrained Environments", "year": "2018", "pdf": "https://arxiv.org/pdf/1808.10032.pdf"}, {"id": "069bb452e015ef53f0ef30e9690e460ccc73cf03", "title": "Multicolumn Networks for Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.09192.pdf"}, {"id": "040dc119d5ca9ea3d5fc39953a91ec507ed8cc5d", "title": "Large-scale Bisample Learning on ID vs. Spot Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1806.03018.pdf"}, {"id": "06bd34951305d9f36eb29cf4532b25272da0e677", "title": "A Fast and Accurate System for Face Detection, Identification, and Verification", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.07586.pdf"}, {"id": "fbc2f5cf943f440b1ba2374ecf82d0176a44f1eb", "title": "3D-Aided Dual-Agent GANs for Unconstrained Face Recognition.", "year": "2018", "pdf": "https://www.ncbi.nlm.nih.gov/pubmed/30040629"}, {"id": "39ed31ced75e6151dde41944a47b4bdf324f922b", "title": "Pose-Guided Photorealistic Face Rotation", "year": "", "pdf": "https://pdfs.semanticscholar.org/39ed/31ced75e6151dde41944a47b4bdf324f922b.pdf"}, {"id": "e4a1b46b5c639d433d21b34b788df8d81b518729", "title": "Side Information for Face Completion: a Robust PCA Approach", "year": 2018, "pdf": "http://pdfs.semanticscholar.org/e4a1/b46b5c639d433d21b34b788df8d81b518729.pdf"}, {"id": "17c0d99171efc957b88c31a465c59485ab033234", "title": "To learn image super-resolution, use a GAN to learn how to do image degradation first", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11458.pdf"}, {"id": "12ba7c6f559a69fbfaacf61bfb2f8431505b09a0", "title": "DocFace+: ID Document to Selfie Matching", "year": "2018", "pdf": "https://arxiv.org/pdf/1809.05620.pdf"}, {"id": "cfb8bc66502fb5f941ecdb22aec1fdbfdb73adce", "title": "Git Loss for Deep Face Recognition", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.08512.pdf"}, {"id": "48fb35946641351f7480a5b88567aae59e526d82", "title": "Generating faces for affect analysis", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.05027.pdf"}, {"id": "c866a2afc871910e3282fd9498dce4ab20f6a332", "title": "Surveillance Face Recognition Challenge", "year": "2018", "pdf": "https://arxiv.org/pdf/1804.09691.pdf"}, {"id": "cf4e5206722ba16061982b885f8c7c86beacd27c", "title": "Group-Level Emotion Recognition Using Hybrid Deep Models Based on Faces, Scenes, Skeletons and Visual Attentions", "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3264990"}, {"id": "2edf55ebc88e89c4caff0c49c6b8e79f46407d19", "title": "Pruning Deep Neural Networks using Partial Least Squares", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.07610.pdf"}, {"id": "173657da03e3249f4e47457d360ab83b3cefbe63", "title": "HKU-Face : A Large Scale Dataset for Deep Face Recognition Final Report", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/1736/57da03e3249f4e47457d360ab83b3cefbe63.pdf"}, {"id": "ac5ab8f71edde6d1a2129da12d051ed03a8446a1", "title": "Comparator Networks", "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11440.pdf"}, {"id": "5e39deb4bff7b887c8f3a44dfe1352fbcde8a0bd", "title": "Supervised COSMOS Autoencoder: Learning Beyond the Euclidean Loss!", "year": "2018", "pdf": "https://arxiv.org/pdf/1810.06221.pdf"}, {"id": "59fc69b3bc4759eef1347161e1248e886702f8f7", "title": "Final Report of Final Year Project HKU-Face : A Large Scale Dataset for Deep Face Recognition", "year": "2018", "pdf": "https://pdfs.semanticscholar.org/59fc/69b3bc4759eef1347161e1248e886702f8f7.pdf"}, {"id": "94f74c6314ffd02db581e8e887b5fd81ce288dbf", "title": "A Light CNN for Deep Face Representation with Noisy Labels", "year": 2015, "pdf": "http://pdfs.semanticscholar.org/94f7/4c6314ffd02db581e8e887b5fd81ce288dbf.pdf"}, {"id": "832a9584e85af1675d49ee35fd13283b21ce3a3f", "title": "Generating Photo-Realistic Training Data to Improve Face Recognition Accuracy", "year": "2018", "pdf": "https://arxiv.org/pdf/1811.00112.pdf"}, {"id": "b7ec41005ce4384e76e3be854ecccd564d2f89fb", "title": "Granular Computing and Sequential Analysis of Deep Embeddings in Fast Still-to-Video Face Recognition", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8441009"}]}
\ No newline at end of file diff --git a/webpack.geocode.dev.js b/webpack.geocode.dev.js new file mode 100644 index 00000000..7ebd43ac --- /dev/null +++ b/webpack.geocode.dev.js @@ -0,0 +1,59 @@ +require('dotenv').config() + +// const HtmlWebpackPlugin = require('html-webpack-plugin') +// const CleanWebpackPlugin = require('clean-webpack-plugin') +const webpack = require('webpack') +const path = require('path') + +module.exports = { + entry: { + main: './scraper/client/index.js' + }, + output: { + path: path.resolve(__dirname, 'scraper/reports'), + filename: 'geocode-app.js' + }, + devtool: 'inline-source-map', + plugins: [ + // new CleanWebpackPlugin(['dist']), + new webpack.DefinePlugin({ + 'process.env.NODE_ENV': '"development"', + 'process.env.S3_HOST': '"' + process.env.S3_HOST + '"', + 'process.env.API_HOST': '""', + }), + // new HtmlWebpackPlugin({ + // title: 'VFrame Metadata', + // meta: { + // viewport: 'width=device-width,initial-scale=1.0' + // } + // }), + // new webpack.HotModuleReplacementPlugin() + ], + module: { + rules: [ + { + test: /\.css$/, + use: ['style-loader', 'css-loader'] + }, + { + test: /\.js$/, + // include: path.resolve(__dirname, 'client'), + exclude: /(node_modules|bower_components|build)/, + use: { + loader: 'babel-loader', + options: { + presets: ['env'], + plugins: [ + require('babel-plugin-transform-runtime'), + require('babel-plugin-transform-es2015-arrow-functions'), + require('babel-plugin-transform-object-rest-spread'), + require('babel-plugin-transform-class-properties'), + require('babel-plugin-transform-react-jsx'), + require('react-hot-loader/babel') + ] + } + } + } + ] + } +}; |
