summaryrefslogtreecommitdiff
path: root/reports/institutions_missing.html
diff options
context:
space:
mode:
Diffstat (limited to 'reports/institutions_missing.html')
-rw-r--r--reports/institutions_missing.html2679
1 files changed, 2527 insertions, 152 deletions
diff --git a/reports/institutions_missing.html b/reports/institutions_missing.html
index 6266cffe..93a26238 100644
--- a/reports/institutions_missing.html
+++ b/reports/institutions_missing.html
@@ -1,4 +1,7 @@
-<!doctype html><html><head><title>Institutions</title><link rel='stylesheet' href='reports.css'></head><body><h2>Institutions</h2><table border='1' cellpadding='3' cellspacing='3'><tr><td>61084a25ebe736e8f6d7a6e53b2c20d9723c4608</td><td></td></tr><tr><td>614a7c42aae8946c7ad4c36b53290860f6256441</td><td>1
+<!doctype html><html><head><title>Institutions</title><link rel='stylesheet' href='reports.css'></head><body><h2>Institutions</h2><table border='1' cellpadding='3' cellspacing='3'><tr><td>61084a25ebe736e8f6d7a6e53b2c20d9723c4608</td><td></td></tr><tr><td>61f04606528ecf4a42b49e8ac2add2e9f92c0def</td><td>Deep Deformation Network for Object Landmark
+<br/>Localization
+<br/>NEC Laboratories America, Department of Media Analytics
+</td></tr><tr><td>614a7c42aae8946c7ad4c36b53290860f6256441</td><td>1
<br/>Joint Face Detection and Alignment using
<br/>Multi-task Cascaded Convolutional Networks
</td></tr><tr><td>0d88ab0250748410a1bc990b67ab2efb370ade5d</td><td>Author(s) :
@@ -24,7 +27,13 @@
</td></tr><tr><td>0dd72887465046b0f8fc655793c6eaaac9c03a3d</td><td>Real-time Head Orientation from a Monocular
<br/>Camera using Deep Neural Network
<br/>KAIST, Republic of Korea
-</td></tr><tr><td>0d087aaa6e2753099789cd9943495fbbd08437c0</td><td></td></tr><tr><td>0d8415a56660d3969449e77095be46ef0254a448</td><td></td></tr><tr><td>0d735e7552af0d1dcd856a8740401916e54b7eee</td><td></td></tr><tr><td>0d06b3a4132d8a2effed115a89617e0a702c957a</td><td></td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td></td></tr><tr><td>956317de62bd3024d4ea5a62effe8d6623a64e53</td><td>Lighting Analysis and Texture Modification of 3D Human
+</td></tr><tr><td>0d087aaa6e2753099789cd9943495fbbd08437c0</td><td></td></tr><tr><td>0d8415a56660d3969449e77095be46ef0254a448</td><td></td></tr><tr><td>0d735e7552af0d1dcd856a8740401916e54b7eee</td><td></td></tr><tr><td>0d06b3a4132d8a2effed115a89617e0a702c957a</td><td></td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td></td></tr><tr><td>0d33b6c8b4d1a3cb6d669b4b8c11c2a54c203d1a</td><td>Detection and Tracking of Faces in Videos: A Review
+<br/>© 2016 IJEDR | Volume 4, Issue 2 | ISSN: 2321-9939
+<br/>of Related Work
+<br/>1Student, 2Assistant Professor
+<br/>1, 2Dept. of Electronics & Comm., S S I E T, Punjab, India
+<br/>________________________________________________________________________________________________________
+</td></tr><tr><td>956317de62bd3024d4ea5a62effe8d6623a64e53</td><td>Lighting Analysis and Texture Modification of 3D Human
<br/>Face Scans
<br/>Author
<br/>Zhang, Paul, Zhao, Sanqiang, Gao, Yongsheng
@@ -54,12 +63,23 @@
<br/>Generalized Zero-Shot Learning for Action
<br/>Recognition with Web-Scale Video Data
<br/>Received: date / Accepted: date
+</td></tr><tr><td>59fc69b3bc4759eef1347161e1248e886702f8f7</td><td>Final Report of Final Year Project
+<br/>HKU-Face: A Large Scale Dataset for
+<br/>Deep Face Recognition
+<br/>3035141841
+<br/>COMP4801 Final Year Project
+<br/>Project Code: 17007
</td></tr><tr><td>59bfeac0635d3f1f4891106ae0262b81841b06e4</td><td>Face Verification Using the LARK Face
<br/>Representation
</td></tr><tr><td>590628a9584e500f3e7f349ba7e2046c8c273fcf</td><td></td></tr><tr><td>59eefa01c067a33a0b9bad31c882e2710748ea24</td><td>IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY
<br/>Fast Landmark Localization
<br/>with 3D Component Reconstruction and CNN for
<br/>Cross-Pose Recognition
+</td></tr><tr><td>5945464d47549e8dcaec37ad41471aa70001907f</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Every Moment Counts: Dense Detailed Labeling of Actions in Complex
+<br/>Videos
+<br/>Received: date / Accepted: date
</td></tr><tr><td>59c9d416f7b3d33141cc94567925a447d0662d80</td><td>Universität des Saarlandes
<br/>Max-Planck-Institut für Informatik
<br/>AG5
@@ -92,11 +112,18 @@
</td></tr><tr><td>923ede53b0842619831e94c7150e0fc4104e62f7</td><td>978-1-4799-9988-0/16/$31.00 ©2016 IEEE
<br/>1293
<br/>ICASSP 2016
+</td></tr><tr><td>92b61b09d2eed4937058d0f9494d9efeddc39002</td><td>Under review in IJCV manuscript No.
+<br/>(will be inserted by the editor)
+<br/>BoxCars: Improving Vehicle Fine-Grained Recognition using
+<br/>3D Bounding Boxes in Traffic Surveillance
+<br/>Received: date / Accepted: date
</td></tr><tr><td>920a92900fbff22fdaaef4b128ca3ca8e8d54c3e</td><td>LEARNING PATTERN TRANSFORMATION MANIFOLDS WITH PARAMETRIC ATOM
<br/>SELECTION
<br/>Ecole Polytechnique F´ed´erale de Lausanne (EPFL)
<br/>Signal Processing Laboratory (LTS4)
<br/>Switzerland-1015 Lausanne
+</td></tr><tr><td>9207671d9e2b668c065e06d9f58f597601039e5e</td><td>Face Detection Using a 3D Model on
+<br/>Face Keypoints
</td></tr><tr><td>9282239846d79a29392aa71fc24880651826af72</td><td>Antonakos et al. EURASIP Journal on Image and Video Processing 2014, 2014:14
<br/>http://jivp.eurasipjournals.com/content/2014/1/14
<br/>RESEARCH
@@ -184,6 +211,17 @@
</td></tr><tr><td>0c75c7c54eec85e962b1720755381cdca3f57dfb</td><td>2212
<br/>Face Landmark Fitting via Optimized Part
<br/>Mixtures and Cascaded Deformable Model
+</td></tr><tr><td>0ca36ecaf4015ca4095e07f0302d28a5d9424254</td><td>Improving Bag-of-Visual-Words Towards Effective Facial Expressive
+<br/>Image Classification
+<br/>1Univ. Grenoble Alpes, CNRS, Grenoble INP∗ , GIPSA-lab, 38000 Grenoble, France
+<br/>Keywords:
+<br/>BoVW, k-means++, Relative Conjunction Matrix, SIFT, Spatial Pyramids, TF.IDF.
+</td></tr><tr><td>0cfca73806f443188632266513bac6aaf6923fa8</td><td>Predictive Uncertainty in Large Scale Classification
+<br/>using Dropout - Stochastic Gradient Hamiltonian
+<br/>Monte Carlo.
+<br/>Vergara, Diego∗1, Hern´andez, Sergio∗2, Valdenegro-Toro, Mat´ıas∗∗3 and Jorquera, Felipe∗4.
+<br/>∗Laboratorio de Procesamiento de Informaci´on Geoespacial, Universidad Cat´olica del Maule, Chile.
+<br/>∗∗German Research Centre for Artificial Intelligence, Bremen, Germany.
</td></tr><tr><td>0c54e9ac43d2d3bab1543c43ee137fc47b77276e</td><td></td></tr><tr><td>0c5afb209b647456e99ce42a6d9d177764f9a0dd</td><td>97
<br/>Recognizing Action Units for
<br/>Facial Expression Analysis
@@ -199,20 +237,29 @@
</td></tr><tr><td>0c53ef79bb8e5ba4e6a8ebad6d453ecf3672926d</td><td>SUBMITTED TO JOURNAL
<br/>Weakly Supervised PatchNets: Describing and
<br/>Aggregating Local Patches for Scene Recognition
-</td></tr><tr><td>0c60eebe10b56dbffe66bb3812793dd514865935</td><td></td></tr><tr><td>660b73b0f39d4e644bf13a1745d6ee74424d4a16</td><td></td></tr><tr><td>66d512342355fb77a4450decc89977efe7e55fa2</td><td>Under review as a conference paper at ICLR 2018
+</td></tr><tr><td>0c60eebe10b56dbffe66bb3812793dd514865935</td><td></td></tr><tr><td>6601a0906e503a6221d2e0f2ca8c3f544a4adab7</td><td>SRTM-2 2/9/06 3:27 PM Page 321
+<br/>Detection of Ancient Settlement Mounds:
+<br/>Archaeological Survey Based on the
+<br/>SRTM Terrain Model
+<br/>B.H. Menze, J.A. Ur, and A.G. Sherratt
+</td></tr><tr><td>660b73b0f39d4e644bf13a1745d6ee74424d4a16</td><td></td></tr><tr><td>66d512342355fb77a4450decc89977efe7e55fa2</td><td>Under review as a conference paper at ICLR 2018
<br/>LEARNING NON-LINEAR TRANSFORM WITH DISCRIM-
<br/>INATIVE AND MINIMUM INFORMATION LOSS PRIORS
<br/>Anonymous authors
<br/>Paper under double-blind review
</td></tr><tr><td>6643a7feebd0479916d94fb9186e403a4e5f7cbf</td><td>Chapter 8
<br/>3D Face Recognition
+</td></tr><tr><td>661ca4bbb49bb496f56311e9d4263dfac8eb96e9</td><td>Datasheets for Datasets
+</td></tr><tr><td>66d087f3dd2e19ffe340c26ef17efe0062a59290</td><td>Dog Breed Identification
+<br/>Brian Mittl
+<br/>Vijay Singh
</td></tr><tr><td>66a2c229ac82e38f1b7c77a786d8cf0d7e369598</td><td>Proceedings of the 2016 Industrial and Systems Engineering Research Conference
<br/>H. Yang, Z. Kong, and MD Sarder, eds.
<br/>A Probabilistic Adaptive Search System
<br/>for Exploring the Face Space
<br/>Escuela Superior Politecnica del Litoral (ESPOL)
<br/>Guayaquil-Ecuador
-</td></tr><tr><td>66886997988358847615375ba7d6e9eb0f1bb27f</td><td></td></tr><tr><td>66a9935e958a779a3a2267c85ecb69fbbb75b8dc</td><td>FAST AND ROBUST FIXED-RANK MATRIX RECOVERY
+</td></tr><tr><td>66886997988358847615375ba7d6e9eb0f1bb27f</td><td></td></tr><tr><td>66837add89caffd9c91430820f49adb5d3f40930</td><td></td></tr><tr><td>66a9935e958a779a3a2267c85ecb69fbbb75b8dc</td><td>FAST AND ROBUST FIXED-RANK MATRIX RECOVERY
<br/>Fast and Robust Fixed-Rank Matrix
<br/>Recovery
<br/>Antonio Lopez
@@ -378,7 +425,9 @@
<br/>sive review of both approaches is given in [5].
</td></tr><tr><td>3edb0fa2d6b0f1984e8e2c523c558cb026b2a983</td><td>Automatic Age Estimation Based on
<br/>Facial Aging Patterns
-</td></tr><tr><td>3ee7a8107a805370b296a53e355d111118e96b7c</td><td></td></tr><tr><td>3ea8a6dc79d79319f7ad90d663558c664cf298d4</td><td></td></tr><tr><td>3e4f84ce00027723bdfdb21156c9003168bc1c80</td><td>1979
+</td></tr><tr><td>3ee7a8107a805370b296a53e355d111118e96b7c</td><td></td></tr><tr><td>3e4acf3f2d112fc6516abcdddbe9e17d839f5d9b</td><td>Deep Value Networks Learn to
+<br/>Evaluate and Iteratively Refine Structured Outputs
+</td></tr><tr><td>3ea8a6dc79d79319f7ad90d663558c664cf298d4</td><td></td></tr><tr><td>3e4f84ce00027723bdfdb21156c9003168bc1c80</td><td>1979
<br/>© EURASIP, 2011 - ISSN 2076-1465
<br/>19th European Signal Processing Conference (EUSIPCO 2011)
<br/>INTRODUCTION
@@ -419,7 +468,7 @@
<br/>K.U.Leuven, Belgium
<br/>Dept. of Computer Science
<br/>K.U.Leuven, Belgium
-</td></tr><tr><td>50d15cb17144344bb1879c0a5de7207471b9ff74</td><td>Divide, Share, and Conquer: Multi-task
+</td></tr><tr><td>50a0930cb8cc353e15a5cb4d2f41b365675b5ebf</td><td></td></tr><tr><td>50d15cb17144344bb1879c0a5de7207471b9ff74</td><td>Divide, Share, and Conquer: Multi-task
<br/>Attribute Learning with Selective Sharing
</td></tr><tr><td>5028c0decfc8dd623c50b102424b93a8e9f2e390</td><td>Published as a conference paper at ICLR 2017
<br/>REVISITING CLASSIFIER TWO-SAMPLE TESTS
@@ -433,7 +482,9 @@
<br/>Part and Attribute Discovery from Relative Annotations
<br/>Received: 25 February 2013 / Accepted: 14 March 2014 / Published online: 26 April 2014
<br/>© Springer Science+Business Media New York 2014
-</td></tr><tr><td>68a3f12382003bc714c51c85fb6d0557dcb15467</td><td></td></tr><tr><td>68d4056765c27fbcac233794857b7f5b8a6a82bf</td><td>Example-Based Face Shape Recovery Using the
+</td></tr><tr><td>68d2afd8c5c1c3a9bbda3dd209184e368e4376b9</td><td>Representation Learning by Rotating Your Faces
+</td></tr><tr><td>68a3f12382003bc714c51c85fb6d0557dcb15467</td><td></td></tr><tr><td>68d08ed9470d973a54ef7806318d8894d87ba610</td><td>Drive Video Analysis for the Detection of Traffic Near-Miss Incidents
+</td></tr><tr><td>68caf5d8ef325d7ea669f3fb76eac58e0170fff0</td><td></td></tr><tr><td>68d4056765c27fbcac233794857b7f5b8a6a82bf</td><td>Example-Based Face Shape Recovery Using the
<br/>Zenith Angle of the Surface Normal
<br/>Mario Castel´an1, Ana J. Almaz´an-Delf´ın2, Marco I. Ram´ırez-Sosa-Mor´an3,
<br/>and Luz A. Torres-M´endez1
@@ -441,6 +492,9 @@
<br/>2 Universidad Veracruzana, Facultad de F´ısica e Inteligencia Artificial, Xalapa 91000,
<br/>3 ITESM, Campus Saltillo, Saltillo 25270, Coahuila, M´exico
<br/>Veracruz, M´exico
+</td></tr><tr><td>684f5166d8147b59d9e0938d627beff8c9d208dd</td><td>IEEE TRANS. NNLS, JUNE 2017
+<br/>Discriminative Block-Diagonal Representation
+<br/>Learning for Image Recognition
</td></tr><tr><td>68cf263a17862e4dd3547f7ecc863b2dc53320d8</td><td></td></tr><tr><td>68e9c837431f2ba59741b55004df60235e50994d</td><td>Detecting Faces Using Region-based Fully
<br/>Convolutional Networks
<br/>Tencent AI Lab, China
@@ -833,7 +887,12 @@
<br/>[8 of 21] T. Boult and W. Scheirer. Long range facial image acquisition and quality. In M. Tisarelli, S. Li, and R. Chellappa.
<br/>[15 of 21] N. Pinto, J. J. DiCarlo, and D. D. Cox. How far can you get with a modern face recognition test set using only simple features? In IEEE CVPR, 2009.
<br/>[18 of 21] T. Sim, S. Baker, and M. Bsat. The CMU Pose, Illumination and Expression (PIE) Database. In Proceedings of the IEEE F&G, May 2002.
-</td></tr><tr><td>5721216f2163d026e90d7cd9942aeb4bebc92334</td><td></td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td></td></tr><tr><td>57d37ad025b5796457eee7392d2038910988655a</td><td>GEERATVEEETATF
+</td></tr><tr><td>5721216f2163d026e90d7cd9942aeb4bebc92334</td><td></td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td></td></tr><tr><td>574ad7ef015995efb7338829a021776bf9daaa08</td><td>AdaScan: Adaptive Scan Pooling in Deep Convolutional Neural Networks
+<br/>for Human Action Recognition in Videos
+<br/>1IIT Kanpur‡
+<br/>2SRI International
+<br/>3UCSD
+</td></tr><tr><td>57d37ad025b5796457eee7392d2038910988655a</td><td>GEERATVEEETATF
<br/>
<br/>by
<br/>DagaEha
@@ -942,9 +1001,17 @@
<br/>Universit´e catholique de Louvain, B-1348 Belgium,
<br/>2 IDIAP, CH-1920 Martigny,
<br/>Switzerland
+</td></tr><tr><td>6f5ce5570dc2960b8b0e4a0a50eab84b7f6af5cb</td><td>Low Resolution Face Recognition Using a
+<br/>Two-Branch Deep Convolutional Neural Network
+<br/>Architecture
</td></tr><tr><td>6f288a12033fa895fb0e9ec3219f3115904f24de</td><td>Learning Expressionlets via Universal Manifold
<br/>Model for Dynamic Facial Expression Recognition
-</td></tr><tr><td>6f2dc51d607f491dbe6338711c073620c85351ac</td><td></td></tr><tr><td>6f75697a86d23d12a14be5466a41e5a7ffb79fad</td><td></td></tr><tr><td>6f6b4e2885ea1d9bea1bb2ed388b099a5a6d9b81</td><td>Structured Output SVM Prediction of Apparent Age,
+</td></tr><tr><td>6f2dc51d607f491dbe6338711c073620c85351ac</td><td></td></tr><tr><td>6f75697a86d23d12a14be5466a41e5a7ffb79fad</td><td></td></tr><tr><td>6f7d06ced04ead3b9a5da86b37e7c27bfcedbbdd</td><td>Pages 51.1-51.12
+<br/>DOI: https://dx.doi.org/10.5244/C.30.51
+</td></tr><tr><td>6f7a8b3e8f212d80f0fb18860b2495be4c363eac</td><td>Creating Capsule Wardrobes from Fashion Images
+<br/>UT-Austin
+<br/>UT-Austin
+</td></tr><tr><td>6f6b4e2885ea1d9bea1bb2ed388b099a5a6d9b81</td><td>Structured Output SVM Prediction of Apparent Age,
<br/>Gender and Smile From Deep Features
<br/>Michal Uˇriˇc´aˇr
<br/>CMP, Dept. of Cybernetics
@@ -975,12 +1042,20 @@
</td></tr><tr><td>6fe2efbcb860767f6bb271edbb48640adbd806c3</td><td>SOFT BIOMETRICS: HUMAN IDENTIFICATION USING COMPARATIVE DESCRIPTIONS
<br/>Soft Biometrics; Human Identification using
<br/>Comparative Descriptions
+</td></tr><tr><td>6fdc0bc13f2517061eaa1364dcf853f36e1ea5ae</td><td>DAISEE: Dataset for Affective States in
+<br/>E-Learning Environments
+<br/>1 Microsoft India R&D Pvt. Ltd.
+<br/>2 Department of Computer Science, IIT Hyderabad
</td></tr><tr><td>6f5151c7446552fd6a611bf6263f14e729805ec7</td><td>5KHHAO /7 %:0 7
<br/>)>IJH=?J 9EJDE JDA ?JANJ B=?A ANFHAIIE ?=IIE?=JE KIEC JDA
<br/>FH>=>EEJEAI JD=J A=?D A B IALAH= ?O ??KHHEC )7 CHKFI EI
<br/>?=IIIAF=H=>EEJO MAECDJEC
<br/>/=>H M=LAAJI H FHE?EF= ?FAJI ==OIEI 2+) ! 1 JDEI F=FAH MA
-</td></tr><tr><td>03d9ccce3e1b4d42d234dba1856a9e1b28977640</td><td></td></tr><tr><td>03f7041515d8a6dcb9170763d4f6debd50202c2b</td><td>Clustering Millions of Faces by Identity
+</td></tr><tr><td>03c56c176ec6377dddb6a96c7b2e95408db65a7a</td><td>A Novel Geometric Framework on Gram Matrix
+<br/>Trajectories for Human Behavior Understanding
+</td></tr><tr><td>03d9ccce3e1b4d42d234dba1856a9e1b28977640</td><td></td></tr><tr><td>0322e69172f54b95ae6a90eb3af91d3daa5e36ea</td><td>Face Classification using Adjusted Histogram in
+<br/>Grayscale
+</td></tr><tr><td>03f7041515d8a6dcb9170763d4f6debd50202c2b</td><td>Clustering Millions of Faces by Identity
</td></tr><tr><td>038ce930a02d38fb30d15aac654ec95640fe5cb0</td><td>Approximate Structured Output Learning for Constrained Local
<br/>Models with Application to Real-time Facial Feature Detection and
<br/>Tracking on Low-power Devices
@@ -1195,8 +1270,10 @@
<br/>approaches,
<br/>1991)
<br/>and
-</td></tr><tr><td>9bcfadd22b2c84a717c56a2725971b6d49d3a804</td><td>How to Detect a Loss of Attention in a Tutoring System
+</td></tr><tr><td>9bc01fa9400c231e41e6a72ec509d76ca797207c</td><td></td></tr><tr><td>9bcfadd22b2c84a717c56a2725971b6d49d3a804</td><td>How to Detect a Loss of Attention in a Tutoring System
<br/>using Facial Expressions and Gaze Direction
+</td></tr><tr><td>9bac481dc4171aa2d847feac546c9f7299cc5aa0</td><td>Matrix Product State for Higher-Order Tensor
+<br/>Compression and Classification
</td></tr><tr><td>9b7974d9ad19bb4ba1ea147c55e629ad7927c5d7</td><td>Faical Expression Recognition by Combining
<br/>Texture and Geometrical Features
</td></tr><tr><td>9ea73660fccc4da51c7bc6eb6eedabcce7b5cead</td><td>Talking Head Detection by Likelihood-Ratio Test†
@@ -1209,7 +1286,11 @@
<br/>
</td></tr><tr><td>9e0285debd4b0ba7769b389181bd3e0fd7a02af6</td><td>From face images and attributes to attributes
<br/>Computer Vision Laboratory, ETH Zurich, Switzerland
-</td></tr><tr><td>9e5c2d85a1caed701b68ddf6f239f3ff941bb707</td><td></td></tr><tr><td>04bb3fa0824d255b01e9db4946ead9f856cc0b59</td><td></td></tr><tr><td>04470861408d14cc860f24e73d93b3bb476492d0</td><td></td></tr><tr><td>0447bdb71490c24dd9c865e187824dee5813a676</td><td>Manifold Estimation in View-based Feature
+</td></tr><tr><td>9e5c2d85a1caed701b68ddf6f239f3ff941bb707</td><td></td></tr><tr><td>04bb3fa0824d255b01e9db4946ead9f856cc0b59</td><td></td></tr><tr><td>040dc119d5ca9ea3d5fc39953a91ec507ed8cc5d</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Large-scale Bisample Learning on ID vs. Spot Face Recognition
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>04470861408d14cc860f24e73d93b3bb476492d0</td><td></td></tr><tr><td>0447bdb71490c24dd9c865e187824dee5813a676</td><td>Manifold Estimation in View-based Feature
<br/>Space for Face Synthesis Across Pose
<br/>Paper 27
</td></tr><tr><td>044ba70e6744e80c6a09fa63ed6822ae241386f2</td><td>TO APPEAR IN AUTONOMOUS ROBOTS, SPECIAL ISSUE IN LEARNING FOR HUMAN-ROBOT COLLABORATION
@@ -1231,10 +1312,18 @@
</td></tr><tr><td>04250e037dce3a438d8f49a4400566457190f4e2</td><td></td></tr><tr><td>0431e8a01bae556c0d8b2b431e334f7395dd803a</td><td>Learning Localized Perceptual Similarity Metrics for Interactive Categorization
<br/>Google Inc.
<br/>google.com
+</td></tr><tr><td>04b4c779b43b830220bf938223f685d1057368e9</td><td>Video retrieval based on deep convolutional
+<br/>neural network
+<br/>Yajiao Dong
+<br/>School of Information and Electronics,
+<br/>Beijing Institution of Technology, Beijing, China
+<br/>Jianguo Li
+<br/>School of Information and Electronics,
+<br/>Beijing Institution of Technology, Beijing, China
</td></tr><tr><td>04616814f1aabe3799f8ab67101fbaf9fd115ae4</td><td><b>UNIVERSIT´EDECAENBASSENORMANDIEU.F.R.deSciences´ECOLEDOCTORALESIMEMTH`ESEPr´esent´eeparM.GauravSHARMAsoutenuele17D´ecembre2012envuedel’obtentionduDOCTORATdel’UNIVERSIT´EdeCAENSp´ecialit´e:InformatiqueetapplicationsArrˆet´edu07aoˆut2006Titre:DescriptionS´emantiquedesHumainsPr´esentsdansdesImagesVid´eo(SemanticDescriptionofHumansinImages)TheworkpresentedinthisthesiswascarriedoutatGREYC-UniversityofCaenandLEAR–INRIAGrenobleJuryM.PatrickPEREZDirecteurdeRechercheINRIA/Technicolor,RennesRapporteurM.FlorentPERRONNINPrincipalScientistXeroxRCE,GrenobleRapporteurM.JeanPONCEProfesseurdesUniversit´esENS,ParisExaminateurMme.CordeliaSCHMIDDirectricedeRechercheINRIA,GrenobleDirectricedeth`eseM.Fr´ed´ericJURIEProfesseurdesUniversit´esUniversit´edeCaenDirecteurdeth`ese</b></td></tr><tr><td>6a3a07deadcaaab42a0689fbe5879b5dfc3ede52</td><td>Learning to Estimate Pose by Watching Videos
<br/>Department of Computer Science and Engineering
<br/>IIT Kanpur
-</td></tr><tr><td>6a184f111d26787703f05ce1507eef5705fdda83</td><td></td></tr><tr><td>6a16b91b2db0a3164f62bfd956530a4206b23fea</td><td>A Method for Real-Time Eye Blink Detection and Its Application
+</td></tr><tr><td>6ad107c08ac018bfc6ab31ec92c8a4b234f67d49</td><td></td></tr><tr><td>6a184f111d26787703f05ce1507eef5705fdda83</td><td></td></tr><tr><td>6a16b91b2db0a3164f62bfd956530a4206b23fea</td><td>A Method for Real-Time Eye Blink Detection and Its Application
<br/>Mahidol Wittayanusorn School
<br/>Puttamonton, Nakornpatom 73170, Thailand
</td></tr><tr><td>6a806978ca5cd593d0ccd8b3711b6ef2a163d810</td><td>Facial feature tracking for Emotional Dynamic
@@ -1353,7 +1442,9 @@
</td></tr><tr><td>32df63d395b5462a8a4a3c3574ae7916b0cd4d1d</td><td>978-1-4577-0539-7/11/$26.00 ©2011 IEEE
<br/>1489
<br/>ICASSP 2011
-</td></tr><tr><td>35308a3fd49d4f33bdbd35fefee39e39fe6b30b7</td><td></td></tr><tr><td>3538d2b5f7ab393387ce138611ffa325b6400774</td><td>A DSP-BASED APPROACH FOR THE IMPLEMENTATION OF FACE RECOGNITION
+</td></tr><tr><td>35308a3fd49d4f33bdbd35fefee39e39fe6b30b7</td><td></td></tr><tr><td>352d61eb66b053ae5689bd194840fd5d33f0e9c0</td><td>Analysis Dictionary Learning based
+<br/>Classification: Structure for Robustness
+</td></tr><tr><td>3538d2b5f7ab393387ce138611ffa325b6400774</td><td>A DSP-BASED APPROACH FOR THE IMPLEMENTATION OF FACE RECOGNITION
<br/>ALGORITHMS
<br/>A. U. Batur
<br/>B. E. Flinchbaugh
@@ -1372,6 +1463,11 @@
<br/>Unconstrained Still/Video-Based Face Verification with Deep
<br/>Convolutional Neural Networks
<br/>Received: date / Accepted: date
+</td></tr><tr><td>35b1c1f2851e9ac4381ef41b4d980f398f1aad68</td><td>Geometry Guided Convolutional Neural Networks for
+<br/>Self-Supervised Video Representation Learning
+</td></tr><tr><td>351c02d4775ae95e04ab1e5dd0c758d2d80c3ddd</td><td>ActionSnapping: Motion-based Video
+<br/>Synchronization
+<br/>Disney Research
</td></tr><tr><td>35e4b6c20756cd6388a3c0012b58acee14ffa604</td><td>Gender Classification in Large Databases
<br/>E. Ram´on-Balmaseda, J. Lorenzo-Navarro, and M. Castrill´on-Santana (cid:63)
<br/>Universidad de Las Palmas de Gran Canaria
@@ -1399,6 +1495,8 @@
</td></tr><tr><td>353a89c277cca3e3e4e8c6a199ae3442cdad59b5</td><td></td></tr><tr><td>352110778d2cc2e7110f0bf773398812fd905eb1</td><td>TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. X, NO. X, JUNE 2014
<br/>Matrix Completion for Weakly-supervised
<br/>Multi-label Image Classification
+</td></tr><tr><td>6964af90cf8ac336a2a55800d9c510eccc7ba8e1</td><td>Temporal Relational Reasoning in Videos
+<br/>MIT CSAIL
</td></tr><tr><td>697b0b9630213ca08a1ae1d459fabc13325bdcbb</td><td></td></tr><tr><td>69d29012d17cdf0a2e59546ccbbe46fa49afcd68</td><td>Subspace clustering of dimensionality-reduced data
<br/>ETH Zurich, Switzerland
</td></tr><tr><td>69de532d93ad8099f4d4902c4cad28db958adfea</td><td></td></tr><tr><td>69526cdf6abbfc4bcd39616acde544568326d856</td><td>636
@@ -1435,7 +1533,7 @@
<br/>M. Correa, J. Ruiz-del-Solar, S. Parra-Tsunekawa, R. Verschae
<br/>Department of Electrical Engineering, Universidad de Chile
<br/>Advanced Mining Technology Center, Universidad de Chile
-</td></tr><tr><td>3c03d95084ccbe7bf44b6d54151625c68f6e74d0</td><td></td></tr><tr><td>3ce2ecf3d6ace8d80303daf67345be6ec33b3a93</td><td></td></tr><tr><td>3c374cb8e730b64dacb9fbf6eb67f5987c7de3c8</td><td>Measuring Gaze Orientation for Human-Robot
+</td></tr><tr><td>3c03d95084ccbe7bf44b6d54151625c68f6e74d0</td><td></td></tr><tr><td>3cd7b15f5647e650db66fbe2ce1852e00c05b2e4</td><td></td></tr><tr><td>3ce2ecf3d6ace8d80303daf67345be6ec33b3a93</td><td></td></tr><tr><td>3c374cb8e730b64dacb9fbf6eb67f5987c7de3c8</td><td>Measuring Gaze Orientation for Human-Robot
<br/>Interaction
<br/>∗ CNRS; LAAS; 7 avenue du Colonel Roche, 31077 Toulouse Cedex, France
<br/>† Universit´e de Toulouse; UPS; LAAS-CNRS : F-31077 Toulouse, France
@@ -1471,7 +1569,9 @@
<br/>2 Visual features
<br/>We use some basic properties of facial features to initialize our algorithm : eyes
<br/>are dark and circular, mouth is an horizontal dark line with a specific color,...
-</td></tr><tr><td>3cb64217ca2127445270000141cfa2959c84d9e7</td><td></td></tr><tr><td>3cd5da596060819e2b156e8b3a28331ef633036b</td><td></td></tr><tr><td>3c8da376576938160cbed956ece838682fa50e9f</td><td>Chapter 4
+</td></tr><tr><td>3cb64217ca2127445270000141cfa2959c84d9e7</td><td></td></tr><tr><td>3cd5da596060819e2b156e8b3a28331ef633036b</td><td></td></tr><tr><td>3c56acaa819f4e2263638b67cea1ec37a226691d</td><td>Body Joint guided 3D Deep Convolutional
+<br/>Descriptors for Action Recognition
+</td></tr><tr><td>3c8da376576938160cbed956ece838682fa50e9f</td><td>Chapter 4
<br/>Aiding Face Recognition with
<br/>Social Context Association Rule
<br/>based Re-Ranking
@@ -1517,16 +1617,31 @@
</td></tr><tr><td>566038a3c2867894a08125efe41ef0a40824a090</td><td>978-1-4244-2354-5/09/$25.00 ©2009 IEEE
<br/>1945
<br/>ICASSP 2009
+</td></tr><tr><td>56dca23481de9119aa21f9044efd7db09f618704</td><td>Riemannian Dictionary Learning and Sparse
+<br/>Coding for Positive Definite Matrices
+</td></tr><tr><td>516a27d5dd06622f872f5ef334313350745eadc3</td><td>> REPLACE THIS LINE WITH YOUR PAPER IDENTIFICATION NUMBER (DOUBLE-CLICK HERE TO EDIT) <
+<br/>1
+<br/>Fine-Grained Facial Expression Analysis Us-
+<br/>ing Dimensional Emotion Model
+<br/>
</td></tr><tr><td>51c3050fb509ca685de3d9ac2e965f0de1fb21cc</td><td>Fantope Regularization in Metric Learning
<br/>Marc T. Law
<br/>Sorbonne Universit´es, UPMC Univ Paris 06, UMR 7606, LIP6, F-75005, Paris, France
</td></tr><tr><td>51c7c5dfda47647aef2797ac3103cf0e108fdfb4</td><td>CS 395T: Celebrity Look-Alikes ∗
</td></tr><tr><td>519f4eb5fe15a25a46f1a49e2632b12a3b18c94d</td><td>Non-Lambertian Reflectance Modeling and
<br/>Shape Recovery of Faces using Tensor Splines
-</td></tr><tr><td>51528cdce7a92835657c0a616c0806594de7513b</td><td></td></tr><tr><td>5157dde17a69f12c51186ffc20a0a6c6847f1a29</td><td>Evolutionary Cost-sensitive Extreme Learning
+</td></tr><tr><td>51528cdce7a92835657c0a616c0806594de7513b</td><td></td></tr><tr><td>5161e38e4ea716dcfb554ccb88901b3d97778f64</td><td>SSPP-DAN: DEEP DOMAIN ADAPTATION NETWORK FOR
+<br/>FACE RECOGNITION WITH SINGLE SAMPLE PER PERSON
+<br/>School of Computing, KAIST, Republic of Korea
+</td></tr><tr><td>51d1a6e15936727e8dd487ac7b7fd39bd2baf5ee</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>A Fast and Accurate System for Face Detection,
+<br/>Identification, and Verification
+</td></tr><tr><td>5157dde17a69f12c51186ffc20a0a6c6847f1a29</td><td>Evolutionary Cost-sensitive Extreme Learning
<br/>Machine
<br/>1
-</td></tr><tr><td>51dc127f29d1bb076d97f515dca4cc42dda3d25b</td><td></td></tr><tr><td>3db75962857a602cae65f60f202d311eb4627b41</td><td></td></tr><tr><td>3d36f941d8ec613bb25e80fb8f4c160c1a2848df</td><td>Out-of-sample generalizations for supervised
+</td></tr><tr><td>51dc127f29d1bb076d97f515dca4cc42dda3d25b</td><td></td></tr><tr><td>3daafe6389d877fe15d8823cdf5ac15fd919676f</td><td>Human Action Localization
+<br/>with Sparse Spatial Supervision
+</td></tr><tr><td>3db75962857a602cae65f60f202d311eb4627b41</td><td></td></tr><tr><td>3d36f941d8ec613bb25e80fb8f4c160c1a2848df</td><td>Out-of-sample generalizations for supervised
<br/>manifold learning for classification
</td></tr><tr><td>3d5a1be4c1595b4805a35414dfb55716e3bf80d8</td><td>Hidden Two-Stream Convolutional Networks for
<br/>Action Recognition
@@ -1539,7 +1654,7 @@
<br/>V.le delle Scienze, Ed. 6, 90128 Palermo, Italy,
<br/>DRAFT
<br/>To appear in ICIAP 2015
-</td></tr><tr><td>3dda181be266950ba1280b61eb63ac11777029f9</td><td></td></tr><tr><td>3dd906bc0947e56d2b7bf9530b11351bbdff2358</td><td></td></tr><tr><td>3d1af6c531ebcb4321607bcef8d9dc6aa9f0dc5a</td><td>1892
+</td></tr><tr><td>3dda181be266950ba1280b61eb63ac11777029f9</td><td></td></tr><tr><td>3d6ee995bc2f3e0f217c053368df659a5d14d5b5</td><td></td></tr><tr><td>3dd906bc0947e56d2b7bf9530b11351bbdff2358</td><td></td></tr><tr><td>3d1af6c531ebcb4321607bcef8d9dc6aa9f0dc5a</td><td>1892
<br/>Random Multispace Quantization as
<br/>an Analytic Mechanism for BioHashing
<br/>of Biometric and Random Identity Inputs
@@ -1557,6 +1672,15 @@
</td></tr><tr><td>58fa85ed57e661df93ca4cdb27d210afe5d2cdcd</td><td>Cancún Center, Cancún, México, December 4-8, 2016
<br/>978-1-5090-4847-2/16/$31.00 ©2016 IEEE
<br/>4118
+</td></tr><tr><td>58bf72750a8f5100e0c01e55fd1b959b31e7dbce</td><td>PyramidBox: A Context-assisted Single Shot
+<br/>Face Detector.
+<br/>Baidu Inc.
+</td></tr><tr><td>58542eeef9317ffab9b155579256d11efb4610f2</td><td>International Journal of Science and Research (IJSR)
+<br/>ISSN (Online): 2319-7064
+<br/>Index Copernicus Value (2013): 6.14 | Impact Factor (2014): 5.611
+<br/>Face Recognition Revisited on Pose, Alignment,
+<br/>Color, Illumination and Expression-PyTen
+<br/>Computer Science, BIT Noida, India
</td></tr><tr><td>58823377757e7dc92f3b70a973be697651089756</td><td>Technical Report
<br/>UCAM-CL-TR-861
<br/>ISSN 1476-2986
@@ -1572,7 +1696,14 @@
</td></tr><tr><td>58bb77dff5f6ee0fb5ab7f5079a5e788276184cc</td><td>Facial Expression Recognition with PCA and LBP
<br/>Features Extracting from Active Facial Patches
<br/>
-</td></tr><tr><td>58cb1414095f5eb6a8c6843326a6653403a0ee17</td><td></td></tr><tr><td>677477e6d2ba5b99633aee3d60e77026fb0b9306</td><td></td></tr><tr><td>6742c0a26315d7354ab6b1fa62a5fffaea06da14</td><td>BAS AND SMITH: WHAT DOES 2D GEOMETRIC INFORMATION REALLY TELL US ABOUT 3D FACE SHAPE?
+</td></tr><tr><td>58cb1414095f5eb6a8c6843326a6653403a0ee17</td><td></td></tr><tr><td>677585ccf8619ec2330b7f2d2b589a37146ffad7</td><td>A flexible model for training action localization
+<br/>with varying levels of supervision
+</td></tr><tr><td>677477e6d2ba5b99633aee3d60e77026fb0b9306</td><td></td></tr><tr><td>6789bddbabf234f31df992a3356b36a47451efc7</td><td>Unsupervised Generation of Free-Form and
+<br/>Parameterized Avatars
+</td></tr><tr><td>675b2caee111cb6aa7404b4d6aa371314bf0e647</td><td>AVA: A Video Dataset of Spatio-temporally Localized Atomic Visual Actions
+<br/>Carl Vondrick∗
+</td></tr><tr><td>679b72d23a9cfca8a7fe14f1d488363f2139265f</td><td></td></tr><tr><td>67484723e0c2cbeb936b2e863710385bdc7d5368</td><td>Anchor Cascade for Efficient Face Detection
+</td></tr><tr><td>6742c0a26315d7354ab6b1fa62a5fffaea06da14</td><td>BAS AND SMITH: WHAT DOES 2D GEOMETRIC INFORMATION REALLY TELL US ABOUT 3D FACE SHAPE?
<br/>What does 2D geometric information
<br/>really tell us about 3D face shape?
</td></tr><tr><td>67a50752358d5d287c2b55e7a45cc39be47bf7d0</td><td></td></tr><tr><td>67ba3524e135c1375c74fe53ebb03684754aae56</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
@@ -1580,6 +1711,20 @@
<br/>ICASSP 2017
</td></tr><tr><td>6769cfbd85329e4815bb1332b118b01119975a95</td><td>Tied factor analysis for face recognition across
<br/>large pose changes
+</td></tr><tr><td>0be43cf4299ce2067a0435798ef4ca2fbd255901</td><td>Title
+<br/>A temporal latent topic model for facial expression recognition
+<br/>Author(s)
+<br/>Shang, L; Chan, KP
+<br/>Citation
+<br/>The 10th Asian Conference on Computer Vision (ACCV 2010),
+<br/>Queenstown, New Zealand, 8-12 November 2010. In Lecture
+<br/>Notes in Computer Science, 2010, v. 6495, p. 51-63
+<br/>Issued Date
+<br/>2011
+<br/>URL
+<br/>http://hdl.handle.net/10722/142604
+<br/>Rights
+<br/>Creative Commons: Attribution 3.0 Hong Kong License
</td></tr><tr><td>0b2277a0609565c30a8ee3e7e193ce7f79ab48b0</td><td>944
<br/>Cost-Sensitive Semi-Supervised Discriminant
<br/>Analysis for Face Recognition
@@ -1601,10 +1746,14 @@
<br/>April 13, 2015
</td></tr><tr><td>0b20f75dbb0823766d8c7b04030670ef7147ccdd</td><td>1
<br/>Feature selection using nearest attributes
+</td></tr><tr><td>0b5a82f8c0ee3640503ba24ef73e672d93aeebbf</td><td>On Learning 3D Face Morphable Model
+<br/>from In-the-wild Images
</td></tr><tr><td>0b174d4a67805b8796bfe86cd69a967d357ba9b6</td><td> Research Journal of Recent Sciences _________________________________________________ ISSN 2277-2502
<br/> Vol. 3(4), 56-62, April (2014)
<br/>Res.J.Recent Sci.
-</td></tr><tr><td>0ba449e312894bca0d16348f3aef41ca01872383</td><td></td></tr><tr><td>0ba99a709cd34654ac296418a4f41a9543928149</td><td></td></tr><tr><td>0b8c92463f8f5087696681fb62dad003c308ebe2</td><td>On Matching Sketches with Digital Face Images
+</td></tr><tr><td>0ba449e312894bca0d16348f3aef41ca01872383</td><td></td></tr><tr><td>0b572a2b7052b15c8599dbb17d59ff4f02838ff7</td><td>Automatic Subspace Learning via Principal
+<br/>Coefficients Embedding
+</td></tr><tr><td>0ba99a709cd34654ac296418a4f41a9543928149</td><td></td></tr><tr><td>0b8c92463f8f5087696681fb62dad003c308ebe2</td><td>On Matching Sketches with Digital Face Images
<br/>in local
</td></tr><tr><td>0bc0f9178999e5c2f23a45325fa50300961e0226</td><td>Recognizing facial expressions from videos using Deep
<br/>Belief Networks
@@ -1621,32 +1770,45 @@
<br/>477
<br/>Learning From Examples in the Small Sample Case:
<br/>Face Expression Recognition
+</td></tr><tr><td>944faf7f14f1bead911aeec30cc80c861442b610</td><td>Action Tubelet Detector for Spatio-Temporal Action Localization
</td></tr><tr><td>9458c518a6e2d40fb1d6ca1066d6a0c73e1d6b73</td><td>5967
<br/>A Benchmark and Comparative Study of
<br/>Video-Based Face Recognition
<br/>on COX Face Database
-</td></tr><tr><td>94aa8a3787385b13ee7c4fdd2b2b2a574ffcbd81</td><td></td></tr><tr><td>9441253b638373a0027a5b4324b4ee5f0dffd670</td><td>A Novel Scheme for Generating Secure Face
+</td></tr><tr><td>94aa8a3787385b13ee7c4fdd2b2b2a574ffcbd81</td><td></td></tr><tr><td>94325522c9be8224970f810554611d6a73877c13</td><td></td></tr><tr><td>9441253b638373a0027a5b4324b4ee5f0dffd670</td><td>A Novel Scheme for Generating Secure Face
<br/>Templates Using BDA
<br/>P.G. Student, Department of Computer Engineering,
<br/>Associate Professor, Department of Computer
<br/>MCERC,
<br/>Nashik (M.S.), India
-</td></tr><tr><td>94ac3008bf6be6be6b0f5140a0bea738d4c75579</td><td></td></tr><tr><td>0e8760fc198a7e7c9f4193478c0e0700950a86cd</td><td></td></tr><tr><td>0e50fe28229fea45527000b876eb4068abd6ed8c</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+</td></tr><tr><td>94ac3008bf6be6be6b0f5140a0bea738d4c75579</td><td></td></tr><tr><td>94a11b601af77f0ad46338afd0fa4ccbab909e82</td><td></td></tr><tr><td>0e8760fc198a7e7c9f4193478c0e0700950a86cd</td><td></td></tr><tr><td>0e50fe28229fea45527000b876eb4068abd6ed8c</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
<br/>2936
</td></tr><tr><td>0eff410cd6a93d0e37048e236f62e209bc4383d1</td><td>Anchorage Convention District
<br/>May 3-8, 2010, Anchorage, Alaska, USA
<br/>978-1-4244-5040-4/10/$26.00 ©2010 IEEE
<br/>4803
+</td></tr><tr><td>0ee737085af468f264f57f052ea9b9b1f58d7222</td><td>SiGAN: Siamese Generative Adversarial Network
+<br/>for Identity-Preserving Face Hallucination
</td></tr><tr><td>0ee661a1b6bbfadb5a482ec643573de53a9adf5e</td><td>JOURNAL OF LATEX CLASS FILES, VOL. X, NO. X, MONTH YEAR
<br/>On the Use of Discriminative Cohort Score
<br/>Normalization for Unconstrained Face Recognition
-</td></tr><tr><td>0e3840ea3227851aaf4633133dd3cbf9bbe89e5b</td><td></td></tr><tr><td>0e5dad0fe99aed6978c6c6c95dc49c6dca601e6a</td><td></td></tr><tr><td>0e7c70321462694757511a1776f53d629a1b38f3</td><td>NIST Special Publication 1136
+</td></tr><tr><td>0e3840ea3227851aaf4633133dd3cbf9bbe89e5b</td><td></td></tr><tr><td>0e5dad0fe99aed6978c6c6c95dc49c6dca601e6a</td><td></td></tr><tr><td>0e2ea7af369dbcaeb5e334b02dd9ba5271b10265</td><td></td></tr><tr><td>0e7c70321462694757511a1776f53d629a1b38f3</td><td>NIST Special Publication 1136
<br/>2012 Proceedings of the
<br/>Performance Metrics for Intelligent
<br/>Systems (PerMI ‘12) Workshop
<br/>
<br/>http://dx.doi.org/10.6028/NIST.SP.1136
-</td></tr><tr><td>6080f26675e44f692dd722b61905af71c5260af8</td><td></td></tr><tr><td>60d765f2c0a1a674b68bee845f6c02741a49b44e</td><td></td></tr><tr><td>60ce4a9602c27ad17a1366165033fe5e0cf68078</td><td>TECHNICAL NOTE
+</td></tr><tr><td>6080f26675e44f692dd722b61905af71c5260af8</td><td></td></tr><tr><td>60d765f2c0a1a674b68bee845f6c02741a49b44e</td><td></td></tr><tr><td>60c24e44fce158c217d25c1bae9f880a8bd19fc3</td><td>Controllable Image-to-Video Translation:
+<br/>A Case Study on Facial Expression Generation
+<br/>MIT CSAIL
+<br/>Wenbing Huang
+<br/>Tencent AI Lab
+<br/>MIT-Waston Lab
+<br/>Tencent AI Lab
+<br/>Tencent AI Lab
+</td></tr><tr><td>60e2b9b2e0db3089237d0208f57b22a3aac932c1</td><td>Frankenstein: Learning Deep Face Representations
+<br/>using Small Data
+</td></tr><tr><td>60ce4a9602c27ad17a1366165033fe5e0cf68078</td><td>TECHNICAL NOTE
<br/>DIGITAL & MULTIMEDIA SCIENCES
<br/>J Forensic Sci, 2015
<br/>doi: 10.1111/1556-4029.12800
@@ -1674,13 +1836,20 @@
</td></tr><tr><td>60b3601d70f5cdcfef9934b24bcb3cc4dde663e7</td><td>SUBMITTED TO IEEE TRANS. ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
<br/>Binary Gradient Correlation Patterns
<br/>for Robust Face Recognition
-</td></tr><tr><td>34a41ec648d082270697b9ee264f0baf4ffb5c8d</td><td></td></tr><tr><td>34b7e826db49a16773e8747bc8dfa48e344e425d</td><td></td></tr><tr><td>341ed69a6e5d7a89ff897c72c1456f50cfb23c96</td><td>DAGER: Deep Age, Gender and Emotion
+</td></tr><tr><td>34a41ec648d082270697b9ee264f0baf4ffb5c8d</td><td></td></tr><tr><td>341002fac5ae6c193b78018a164d3c7295a495e4</td><td>von Mises-Fisher Mixture Model-based Deep
+<br/>learning: Application to Face Verification
+</td></tr><tr><td>34ec83c8ff214128e7a4a4763059eebac59268a6</td><td>Action Anticipation By Predicting Future
+<br/>Dynamic Images
+<br/>Australian Centre for Robotic Vision, ANU, Canberra, Australia
+</td></tr><tr><td>34b7e826db49a16773e8747bc8dfa48e344e425d</td><td></td></tr><tr><td>341ed69a6e5d7a89ff897c72c1456f50cfb23c96</td><td>DAGER: Deep Age, Gender and Emotion
<br/>Recognition Using Convolutional Neural
<br/>Networks
<br/>Computer Vision Lab, Sighthound Inc., Winter Park, FL
</td></tr><tr><td>340d1a9852747b03061e5358a8d12055136599b0</td><td>Audio-Visual Recognition System Insusceptible
<br/>to Illumination Variation over Internet Protocol
<br/>
+</td></tr><tr><td>5a3da29970d0c3c75ef4cb372b336fc8b10381d7</td><td>CNN-based Real-time Dense Face Reconstruction
+<br/>with Inverse-rendered Photo-realistic Face Images
</td></tr><tr><td>5a34a9bb264a2594c02b5f46b038aa1ec3389072</td><td>Label-Embedding for Image Classification
</td></tr><tr><td>5a4c6246758c522f68e75491eb65eafda375b701</td><td>978-1-4244-4296-6/10/$25.00 ©2010 IEEE
<br/>1118
@@ -1688,13 +1857,28 @@
</td></tr><tr><td>5aad5e7390211267f3511ffa75c69febe3b84cc7</td><td>Driver Gaze Estimation
<br/>Without Using Eye Movement
<br/>MIT AgeLab
-</td></tr><tr><td>5a029a0b0ae8ae7fc9043f0711b7c0d442bfd372</td><td></td></tr><tr><td>5a7520380d9960ff3b4f5f0fe526a00f63791e99</td><td>The Indian Spontaneous Expression
+</td></tr><tr><td>5a029a0b0ae8ae7fc9043f0711b7c0d442bfd372</td><td></td></tr><tr><td>5a4ec5c79f3699ba037a5f06d8ad309fb4ee682c</td><td>Downloaded From: https://www.spiedigitallibrary.org/journals/Journal-of-Electronic-Imaging on 12/17/2017 Terms of Use: https://www.spiedigitallibrary.org/terms-of-use
+<br/>AutomaticageandgenderclassificationusingsupervisedappearancemodelAliMainaBukarHassanUgailDavidConnahAliMainaBukar,HassanUgail,DavidConnah,“Automaticageandgenderclassificationusingsupervisedappearancemodel,”J.Electron.Imaging25(6),061605(2016),doi:10.1117/1.JEI.25.6.061605. </td></tr><tr><td>5a7520380d9960ff3b4f5f0fe526a00f63791e99</td><td>The Indian Spontaneous Expression
<br/>Database for Emotion Recognition
+</td></tr><tr><td>5fff61302adc65d554d5db3722b8a604e62a8377</td><td>Additive Margin Softmax for Face Verification
+<br/>UESTC
+<br/>Georgia Tech
+<br/>UESTC
+<br/>UESTC
+</td></tr><tr><td>5fa6e4a23da0b39e4b35ac73a15d55cee8608736</td><td>IJCV special issue (Best papers of ECCV 2016) manuscript No.
+<br/>(will be inserted by the editor)
+<br/>RED-Net:
+<br/>A Recurrent Encoder-Decoder Network for Video-based Face Alignment
+<br/>Submitted: April 19 2017 / Revised: December 12 2017
</td></tr><tr><td>5f871838710a6b408cf647aacb3b198983719c31</td><td>1716
<br/>Locally Linear Regression for Pose-Invariant
<br/>Face Recognition
</td></tr><tr><td>5f64a2a9b6b3d410dd60dc2af4a58a428c5d85f9</td><td></td></tr><tr><td>5f344a4ef7edfd87c5c4bc531833774c3ed23542</td><td>c
-</td></tr><tr><td>5fa0e6da81acece7026ac1bc6dcdbd8b204a5f0a</td><td></td></tr><tr><td>5fa932be4d30cad13ea3f3e863572372b915bec8</td><td></td></tr><tr><td>5f5906168235613c81ad2129e2431a0e5ef2b6e4</td><td>Noname manuscript No.
+</td></tr><tr><td>5fa0e6da81acece7026ac1bc6dcdbd8b204a5f0a</td><td></td></tr><tr><td>5f27ed82c52339124aa368507d66b71d96862cb7</td><td>Semi-supervised Learning of Classifiers: Theory, Algorithms
+<br/>and Their Application to Human-Computer Interaction
+<br/>This work has been partially funded by NSF Grant IIS 00-85980.
+<br/>DRAFT
+</td></tr><tr><td>5fa932be4d30cad13ea3f3e863572372b915bec8</td><td></td></tr><tr><td>5f5906168235613c81ad2129e2431a0e5ef2b6e4</td><td>Noname manuscript No.
<br/>(will be inserted by the editor)
<br/>A Unified Framework for Compositional Fitting of
<br/>Active Appearance Models
@@ -1731,7 +1915,9 @@
<br/>A Face and Palmprint Recognition Approach Based
<br/>on Discriminant DCT Feature Extraction
</td></tr><tr><td>339937141ffb547af8e746718fbf2365cc1570c8</td><td>Facial Emotion Recognition in Real Time
-</td></tr><tr><td>33ae696546eed070717192d393f75a1583cd8e2c</td><td></td></tr><tr><td>334d6c71b6bce8dfbd376c4203004bd4464c2099</td><td>BICONVEX RELAXATION FOR SEMIDEFINITE PROGRAMMING IN
+</td></tr><tr><td>33aa980544a9d627f305540059828597354b076c</td><td></td></tr><tr><td>33ae696546eed070717192d393f75a1583cd8e2c</td><td></td></tr><tr><td>3352426a67eabe3516812cb66a77aeb8b4df4d1b</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 4, NO. 5, APRIL 2015
+<br/>Joint Multi-view Face Alignment in the Wild
+</td></tr><tr><td>334d6c71b6bce8dfbd376c4203004bd4464c2099</td><td>BICONVEX RELAXATION FOR SEMIDEFINITE PROGRAMMING IN
<br/>COMPUTER VISION
</td></tr><tr><td>33e20449aa40488c6d4b430a48edf5c4b43afdab</td><td>TRANSACTIONS ON AFFECTIVE COMPUTING
<br/>The Faces of Engagement: Automatic
@@ -1806,7 +1992,7 @@
</td></tr><tr><td>05f4d907ee2102d4c63a3dc337db7244c570d067</td><td></td></tr><tr><td>05a7be10fa9af8fb33ae2b5b72d108415519a698</td><td>Multilayer and Multimodal Fusion of Deep Neural Networks
<br/>for Video Classification
<br/>NVIDIA
-</td></tr><tr><td>0580edbd7865414c62a36da9504d1169dea78d6f</td><td>Baseline CNN structure analysis for facial expression recognition
+</td></tr><tr><td>050a149051a5d268fcc5539e8b654c2240070c82</td><td>MAGISTERSKÉ A DOKTORSKÉSTUDIJNÍ PROGRAMY31. 5. 2018SBORNÍKSTUDENTSKÁ VĚDECKÁ KONFERENCE </td></tr><tr><td>0580edbd7865414c62a36da9504d1169dea78d6f</td><td>Baseline CNN structure analysis for facial expression recognition
</td></tr><tr><td>05e96d76ed4a044d8e54ef44dac004f796572f1a</td><td></td></tr><tr><td>9d839dfc9b6a274e7c193039dfa7166d3c07040b</td><td>Augmented Faces
<br/>1ETH Z¨urich
<br/>2Kooaba AG
@@ -1818,7 +2004,35 @@
<br/>video
<br/>(Eigen
<br/>passport-verification,
-</td></tr><tr><td>9cfb3a68fb10a59ec2a6de1b24799bf9154a8fd1</td><td></td></tr><tr><td>02601d184d79742c7cd0c0ed80e846d95def052e</td><td>Graphical Representation for Heterogeneous
+</td></tr><tr><td>9cfb3a68fb10a59ec2a6de1b24799bf9154a8fd1</td><td></td></tr><tr><td>9ca7899338129f4ba6744f801e722d53a44e4622</td><td>Deep Neural Networks Regularization for Structured
+<br/>Output Prediction
+<br/>Soufiane Belharbi∗
+<br/>INSA Rouen, LITIS
+<br/>76000 Rouen, France
+<br/>INSA Rouen, LITIS
+<br/>76000 Rouen, France
+<br/>INSA Rouen, LITIS
+<br/>76000 Rouen, France
+<br/>INSA Rouen, LITIS
+<br/>76000 Rouen, France
+<br/>Normandie Univ, UNIROUEN, UNIHAVRE,
+<br/>Normandie Univ, UNIROUEN, UNIHAVRE,
+<br/>Normandie Univ, UNIROUEN, UNIHAVRE,
+<br/>Normandie Univ, UNIROUEN, UNIHAVRE,
+</td></tr><tr><td>9c1664f69d0d832e05759e8f2f001774fad354d6</td><td>Action representations in robotics: A
+<br/>taxonomy and systematic classification
+<br/>Journal Title
+<br/>XX(X):1–32
+<br/>c(cid:13)The Author(s) 2016
+<br/>Reprints and permission:
+<br/>sagepub.co.uk/journalsPermissions.nav
+<br/>DOI: 10.1177/ToBeAssigned
+<br/>www.sagepub.com/
+</td></tr><tr><td>9c065dfb26ce280610a492c887b7f6beccf27319</td><td>Learning from Video and Text via Large-Scale Discriminative Clustering
+<br/>1 ´Ecole Normale Sup´erieure
+<br/>2Inria
+<br/>3CIIRC
+</td></tr><tr><td>02601d184d79742c7cd0c0ed80e846d95def052e</td><td>Graphical Representation for Heterogeneous
<br/>Face Recognition
</td></tr><tr><td>02cc96ad997102b7c55e177ac876db3b91b4e72c</td><td>MuseumVisitors: a dataset for pedestrian and group detection, gaze estimation
<br/>and behavior understanding
@@ -1944,6 +2158,10 @@
<br/>ASL4GUP 2017
<br/>Held in conjunction with IEEE FG 2017, in May 30, 2017,
<br/>Washington DC, USA
+</td></tr><tr><td>a3d8b5622c4b9af1f753aade57e4774730787a00</td><td>Pose-Aware Person Recognition
+<br/>Anoop Namboodiri (cid:63)
+<br/>(cid:63) CVIT, IIIT Hyderabad, India
+<br/>† Facebook AI Research
</td></tr><tr><td>a3017bb14a507abcf8446b56243cfddd6cdb542b</td><td>Face Localization and Recognition in Varied
<br/>Expressions and Illumination
<br/>Hui-Yu Huang, Shih-Hang Hsu
@@ -1956,11 +2174,16 @@
<br/>Face++, Megvii Inc.
<br/>Face++, Megvii Inc.
<br/>Face++, Megvii Inc.
+</td></tr><tr><td>a3f69a073dcfb6da8038607a9f14eb28b5dab2db</td><td>Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence (IJCAI-18)
+<br/>1184
+</td></tr><tr><td>a3f78cc944ac189632f25925ba807a0e0678c4d5</td><td>Action Recognition in Realistic Sports Videos
</td></tr><tr><td>a33f20773b46283ea72412f9b4473a8f8ad751ae</td><td></td></tr><tr><td>a3a6a6a2eb1d32b4dead9e702824375ee76e3ce7</td><td>Multiple Local Curvature Gabor Binary
<br/>Patterns for Facial Action Recognition
<br/>Signal Processing Laboratory (LTS5),
<br/>´Ecole Polytechnique F´ed´erale de Lausanne, Switzerland
-</td></tr><tr><td>a3d78bc94d99fdec9f44a7aa40c175d5a106f0b9</td><td>Recognizing Violence in Movies
+</td></tr><tr><td>a32c5138c6a0b3d3aff69bcab1015d8b043c91fb</td><td>Downloaded From: https://www.spiedigitallibrary.org/journals/Journal-of-Electronic-Imaging on 9/19/2018
+<br/>Terms of Use: https://www.spiedigitallibrary.org/terms-of-use
+<br/>Videoredaction:asurveyandcomparisonofenablingtechnologiesShaganSahAmeyaShringiRaymondPtuchaAaronBurryRobertLoceShaganSah,AmeyaShringi,RaymondPtucha,AaronBurry,RobertLoce,“Videoredaction:asurveyandcomparisonofenablingtechnologies,”J.Electron.Imaging26(5),051406(2017),doi:10.1117/1.JEI.26.5.051406. </td></tr><tr><td>a3d78bc94d99fdec9f44a7aa40c175d5a106f0b9</td><td>Recognizing Violence in Movies
<br/>CIS400/401 Project Final Report
<br/>Univ. of Pennsylvania
<br/>Philadelphia, PA
@@ -2022,13 +2245,16 @@
<br/>IEEE SIGNAL PROCESSING MAGAZINE
<br/>1053-5888/04/$20.00©2004IEEE
<br/>MARCH 2004
+</td></tr><tr><td>b558be7e182809f5404ea0fcf8a1d1d9498dc01a</td><td>Bottom-up and top-down reasoning with convolutional latent-variable models
+<br/>UC Irvine
+<br/>UC Irvine
</td></tr><tr><td>b5fc4f9ad751c3784eaf740880a1db14843a85ba</td><td>SIViP (2007) 1:225–237
<br/>DOI 10.1007/s11760-007-0016-5
<br/>ORIGINAL PAPER
<br/>Significance of image representation for face verification
<br/>Received: 29 August 2006 / Revised: 28 March 2007 / Accepted: 28 March 2007 / Published online: 1 May 2007
<br/>© Springer-Verlag London Limited 2007
-</td></tr><tr><td>b5160e95192340c848370f5092602cad8a4050cd</td><td>IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY, TO APPEAR
+</td></tr><tr><td>b562def2624f59f7d3824e43ecffc990ad780898</td><td></td></tr><tr><td>b5160e95192340c848370f5092602cad8a4050cd</td><td>IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY, TO APPEAR
<br/>Video Classification With CNNs: Using The Codec
<br/>As A Spatio-Temporal Activity Sensor
</td></tr><tr><td>b52c0faba5e1dc578a3c32a7f5cfb6fb87be06ad</td><td>Journal of Applied Research and
@@ -2056,7 +2282,7 @@
</td></tr><tr><td>b5857b5bd6cb72508a166304f909ddc94afe53e3</td><td>SSIG and IRISA at Multimodal Person Discovery
<br/>1Department of Computer Science, Universidade Federal de Minas Gerais, Belo Horizonte, Brazil
<br/>2IRISA & Inria Rennes , CNRS, Rennes, France
-</td></tr><tr><td>b51e3d59d1bcbc023f39cec233f38510819a2cf9</td><td>CBMM Memo No. 003
+</td></tr><tr><td>b59f441234d2d8f1765a20715e227376c7251cd7</td><td></td></tr><tr><td>b51e3d59d1bcbc023f39cec233f38510819a2cf9</td><td>CBMM Memo No. 003
<br/>March 27, 2014
<br/>Can a biologically-plausible hierarchy effectively
<br/>replace face detection, alignment, and
@@ -2068,6 +2294,10 @@
<br/>using Partial Observations
<br/>Snap Research
<br/>Microsoft Research
+</td></tr><tr><td>b2b535118c5c4dfcc96f547274cdc05dde629976</td><td>JOURNAL OF IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. X, XXX 2017
+<br/>Automatic Recognition of Facial Displays of
+<br/>Unfelt Emotions
+<br/>Escalera, Xavier Bar´o, Sylwia Hyniewska, Member, IEEE, J¨uri Allik,
</td></tr><tr><td>b235b4ccd01a204b95f7408bed7a10e080623d2e</td><td>Regularizing Flat Latent Variables with Hierarchical Structures
</td></tr><tr><td>b2c25af8a8e191c000f6a55d5f85cf60794c2709</td><td>Noname manuscript No.
<br/>(will be inserted by the editor)
@@ -2075,15 +2305,29 @@
<br/>Kernel Optimization Through Graph Embedding
<br/>N. Vretos, A. Tefas and I. Pitas
<br/>the date of receipt and acceptance should be inserted later
+</td></tr><tr><td>d904f945c1506e7b51b19c99c632ef13f340ef4c</td><td>A scalable 3D HOG model for fast object detection and viewpoint estimation
+<br/>KU Leuven, ESAT/PSI - iMinds
+<br/>Kasteelpark Arenberg 10 B-3001 Leuven, Belgium
</td></tr><tr><td>d94d7ff6f46ad5cab5c20e6ac14c1de333711a0c</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
<br/>3031
<br/>ICASSP 2017
</td></tr><tr><td>d9739d1b4478b0bf379fe755b3ce5abd8c668f89</td><td></td></tr><tr><td>d9318c7259e394b3060b424eb6feca0f71219179</td><td>406
<br/>Face Matching and Retrieval Using Soft Biometrics
-</td></tr><tr><td>d9a1dd762383213741de4c1c1fd9fccf44e6480d</td><td></td></tr><tr><td>ac6a9f80d850b544a2cbfdde7002ad5e25c05ac6</td><td>779
+</td></tr><tr><td>d9a1dd762383213741de4c1c1fd9fccf44e6480d</td><td></td></tr><tr><td>d9c4b1ca997583047a8721b7dfd9f0ea2efdc42c</td><td>Learning Inference Models for Computer Vision
+</td></tr><tr><td>aca232de87c4c61537c730ee59a8f7ebf5ecb14f</td><td>EBGM VS SUBSPACE PROJECTION FOR FACE RECOGNITION
+<br/>19.5 Km Markopoulou Avenue, P.O. Box 68, Peania, Athens, Greece
+<br/>Athens Information Technology
+<br/>Keywords:
+<br/>Human-Machine Interfaces, Computer Vision, Face Recognition.
+</td></tr><tr><td>ac6a9f80d850b544a2cbfdde7002ad5e25c05ac6</td><td>779
<br/>Privacy-Protected Facial Biometric Verification
<br/>Using Fuzzy Forest Learning
-</td></tr><tr><td>aca273a9350b10b6e2ef84f0e3a327255207d0f5</td><td></td></tr><tr><td>ac820d67b313c38b9add05abef8891426edd5afb</td><td></td></tr><tr><td>acb83d68345fe9a6eb9840c6e1ff0e41fa373229</td><td>Kernel Methods in Computer Vision:
+</td></tr><tr><td>aca273a9350b10b6e2ef84f0e3a327255207d0f5</td><td></td></tr><tr><td>ac0d3f6ed5c42b7fc6d7c9e1a9bb80392742ad5e</td><td></td></tr><tr><td>ac820d67b313c38b9add05abef8891426edd5afb</td><td></td></tr><tr><td>ac26166857e55fd5c64ae7194a169ff4e473eb8b</td><td>Personalized Age Progression with Bi-level
+<br/>Aging Dictionary Learning
+</td></tr><tr><td>ac8441e30833a8e2a96a57c5e6fede5df81794af</td><td>IEEE TRANSACTIONS ON IMAGE PROCESSING
+<br/>Hierarchical Representation Learning for Kinship
+<br/>Verification
+</td></tr><tr><td>acb83d68345fe9a6eb9840c6e1ff0e41fa373229</td><td>Kernel Methods in Computer Vision:
<br/>Object Localization, Clustering,
<br/>and Taxonomy Discovery
<br/>vorgelegt von
@@ -2121,7 +2365,73 @@
<br/>Submitted for the degree of Doctor of Philosophy
<br/>Department of Computer Science
<br/>20th February 2007
-</td></tr><tr><td>ad6745dd793073f81abd1f3246ba4102046da022</td><td></td></tr><tr><td>bba281fe9c309afe4e5cc7d61d7cff1413b29558</td><td>Social Cognitive and Affective Neuroscience, 2017, 984–992
+</td></tr><tr><td>ad6745dd793073f81abd1f3246ba4102046da022</td><td></td></tr><tr><td>adf62dfa00748381ac21634ae97710bb80fc2922</td><td>ViFaI: A trained video face indexing scheme
+<br/>1. Introduction
+<br/>With the increasing prominence of inexpensive
+<br/>video recording devices (e.g., digital camcorders and
+<br/>video recording smartphones),
+<br/>the average user’s
+<br/>video collection today is increasing rapidly. With this
+<br/>development, there arises a natural desire to rapidly
+<br/>access a subset of one’s collection of videos. The solu-
+<br/>tion to this problem requires an effective video index-
+<br/>ing scheme. In particular, we must be able to easily
+<br/>process a video to extract such indexes.
+<br/>Today, there also exist large sets of labeled (tagged)
+<br/>face images. One important example is an individual’s
+<br/>Facebook profile. Such a set of of tagged images of
+<br/>one’s self, family, friends, and colleagues represents
+<br/>an extremely valuable potential training set.
+<br/>In this work, we explore how to leverage the afore-
+<br/>mentioned training set to solve the video indexing
+<br/>problem.
+<br/>2. Problem Statement
+<br/>Use a labeled (tagged) training set of face images
+<br/>to extract relevant indexes from a collection of videos,
+<br/>and use these indexes to answer boolean queries of the
+<br/>form: “videos with ‘Person 1’ OP1 ‘Person 2’ OP2 ...
+<br/>OP(N-1) ‘Person N’ ”, where ‘Person N’ corresponds
+<br/>to a training label (tag) and OPN is a boolean operand
+<br/>such as AND, OR, NOT, XOR, and so on.
+<br/>3. Proposed Scheme
+<br/>In this section, we outline our proposed scheme to
+<br/>address the problem we postulate in the previous sec-
+<br/>tion. We provide further details about the system im-
+<br/>plementation in Section 4.
+<br/>At a high level, we subdivide the problem into two
+<br/>key phases: the first ”off-line” executed once, and the
+<br/>second ”on-line” phase instantiated upon each query.
+<br/>For the purposes of this work, we define an index as
+<br/>follows: <video id, tag, frame #>.
+<br/>3.1. The training phase
+<br/>We first outline Phase 1 (the training or “off-line”
+<br/>phase):
+<br/>1. Use the labeled training set plus an additional set
+<br/>of ‘other’ faces to compute the Fisher Linear Dis-
+<br/>criminant (FLD) [1].
+<br/>2. Project the training data onto the space defined by
+<br/>the eigenvectors returned by the FLD, and train
+<br/>a classifier (first nearest neighbour, then SVM if
+<br/>required) using the training features.
+<br/>3. Iterate through each frame of each video, detect-
+<br/>ing faces [2], classifying detected results, and add
+<br/>an index if the detected face corresponds to one of
+<br/>the labeled classes from the previous step.
+<br/>3.2. The query phase
+<br/>Now, we outline Phase 2 (the query or “on-line”
+<br/>phase):
+<br/>1. Key the indexes on their video id.
+<br/>2. For each video, evaluate the boolean query for the
+<br/>set of corresponding indexes.
+<br/>3. Keep videos for which the boolean query evalu-
+<br/>ates true, and discard those for which it evaluates
+<br/>false.
+<br/>4. Implementation Details
+<br/>We are implementing the project in C++, leverag-
+<br/>ing the OpenCV v2.2 framework [4]. In this section,
+<br/>we will highlight some of the critical implementation
+<br/>details of our proposed system.
+</td></tr><tr><td>bba281fe9c309afe4e5cc7d61d7cff1413b29558</td><td>Social Cognitive and Affective Neuroscience, 2017, 984–992
<br/>doi: 10.1093/scan/nsx030
<br/>Advance Access Publication Date: 11 April 2017
<br/>Original article
@@ -2142,7 +2452,9 @@
<br/>==OIEI 7IK=O = B=?E= ANFHAIIE ==OIEI IOIJA ?J=EI JDHAA IJ=CAI B=?A =?GKE
<br/>9DAJDAH KIEC *=OAIE= ?=IIEAH " & IKFFHJ LA?JH =?DEA 58  H AKH=
<br/>HACEI E = IECA ?=IIEAH EI = ? IJH=JACO & 0MALAH J = ?= HACEI
-</td></tr><tr><td>bbe1332b4d83986542f5db359aee1fd9b9ba9967</td><td></td></tr><tr><td>bbf01aa347982592b3e4c9e4f433e05d30e71305</td><td></td></tr><tr><td>bbf1396eb826b3826c5a800975047beabde2f0de</td><td></td></tr><tr><td>d73d2c9a6cef79052f9236e825058d5d9cdc1321</td><td>2014-ENST-0040
+</td></tr><tr><td>bbe1332b4d83986542f5db359aee1fd9b9ba9967</td><td></td></tr><tr><td>bb7f2c5d84797742f1d819ea34d1f4b4f8d7c197</td><td>TO APPEAR IN TPAMI
+<br/>From Images to 3D Shape Attributes
+</td></tr><tr><td>bbf01aa347982592b3e4c9e4f433e05d30e71305</td><td></td></tr><tr><td>bbf1396eb826b3826c5a800975047beabde2f0de</td><td></td></tr><tr><td>bbd1eb87c0686fddb838421050007e934b2d74ab</td><td></td></tr><tr><td>d73d2c9a6cef79052f9236e825058d5d9cdc1321</td><td>2014-ENST-0040
<br/>EDITE - ED 130
<br/>Doctorat ParisTech
<br/>T H È S E
@@ -2259,11 +2571,36 @@
<br/>Hollywood Human Action: The Hollywood
<br/>dataset [3] contains 8 action classes collected from
<br/>32 Hollywood movies with a total of 430 videos.
+</td></tr><tr><td>d7b6bbb94ac20f5e75893f140ef7e207db7cd483</td><td>Griffith Research Online
+<br/>https://research-repository.griffith.edu.au
+<br/>Face Recognition across Pose: A
+<br/>Review
+<br/>Author
+<br/>Zhang, Paul, Gao, Yongsheng
+<br/>Published
+<br/>2009
+<br/>Journal Title
+<br/>Pattern Recognition
+<br/>DOI
+<br/>https://doi.org/10.1016/j.patcog.2009.04.017
+<br/>Copyright Statement
+<br/>Copyright 2009 Elsevier. This is the author-manuscript version of this paper. Reproduced in accordance
+<br/>with the copyright policy of the publisher. Please refer to the journal's website for access to the
+<br/>definitive, published version.
+<br/>Downloaded from
+<br/>http://hdl.handle.net/10072/30193
</td></tr><tr><td>d78373de773c2271a10b89466fe1858c3cab677f</td><td></td></tr><tr><td>d03265ea9200a993af857b473c6bf12a095ca178</td><td>Multiple deep convolutional neural
<br/>networks averaging for face
<br/>alignment
<br/>Zhouping Yin
-<br/>Downloaded From: http://electronicimaging.spiedigitallibrary.org/ on 05/28/2015 Terms of Use: http://spiedl.org/terms </td></tr><tr><td>d0eb3fd1b1750242f3bb39ce9ac27fc8cc7c5af0</td><td></td></tr><tr><td>d03baf17dff5177d07d94f05f5791779adf3cd5f</td><td></td></tr><tr><td>d0a21f94de312a0ff31657fd103d6b29db823caa</td><td>Facial Expression Analysis
+<br/>Downloaded From: http://electronicimaging.spiedigitallibrary.org/ on 05/28/2015 Terms of Use: http://spiedl.org/terms </td></tr><tr><td>d0eb3fd1b1750242f3bb39ce9ac27fc8cc7c5af0</td><td></td></tr><tr><td>d03baf17dff5177d07d94f05f5791779adf3cd5f</td><td></td></tr><tr><td>d0144d76b8b926d22411d388e7a26506519372eb</td><td>Improving Regression Performance with Distributional Losses
+</td></tr><tr><td>d02e27e724f9b9592901ac1f45830341d37140fe</td><td>DA-GAN: Instance-level Image Translation by Deep Attention Generative
+<br/>Adversarial Networks
+<br/>The State Universtiy of New York at Buffalo
+<br/>The State Universtiy of New York at Buffalo
+<br/>Microsoft Research
+<br/>Microsoft Research
+</td></tr><tr><td>d0a21f94de312a0ff31657fd103d6b29db823caa</td><td>Facial Expression Analysis
</td></tr><tr><td>d03e4e938bcbc25aa0feb83d8a0830f9cd3eb3ea</td><td>Face Recognition with Patterns of Oriented
<br/>Edge Magnitudes
<br/>1 Vesalis Sarl, Clermont Ferrand, France
@@ -2277,8 +2614,35 @@
<br/>B A S I C R E S E A RC H
<br/>M E T H O D S A N D
<br/>P RO C E D U R E S
+</td></tr><tr><td>be48b5dcd10ab834cd68d5b2a24187180e2b408f</td><td>FOR PERSONAL USE ONLY
+<br/>Constrained Low-rank Learning Using Least
+<br/>Squares Based Regularization
+</td></tr><tr><td>be437b53a376085b01ebd0f4c7c6c9e40a4b1a75</td><td>ISSN (Online) 2321 – 2004
+<br/>ISSN (Print) 2321 – 5526
+<br/> INTERNATIONAL JOURNAL OF INNOVATIVE RESEARCH IN ELECTRICAL, ELECTRONICS, INSTRUMENTATION AND CONTROL ENGINEERING
+<br/> Vol. 4, Issue 5, May 2016
+<br/>IJIREEICE
+<br/>Face Recognition and Retrieval Using Cross
+<br/>Age Reference Coding
+<br/> BE, DSCE, Bangalore1
+<br/>Assistant Professor, DSCE, Bangalore2
+</td></tr><tr><td>bebea83479a8e1988a7da32584e37bfc463d32d4</td><td>Discovery of Latent 3D Keypoints via
+<br/>End-to-end Geometric Reasoning
+<br/>Google AI
</td></tr><tr><td>bef503cdfe38e7940141f70524ee8df4afd4f954</td><td></td></tr><tr><td>beab10d1bdb0c95b2f880a81a747f6dd17caa9c2</td><td>DeepDeblur: Fast one-step blurry face images restoration
<br/>Tsinghua Unversity
+</td></tr><tr><td>b331ca23aed90394c05f06701f90afd550131fe3</td><td>Zhou et al. EURASIP Journal on Image and Video Processing (2018) 2018:49
+<br/>https://doi.org/10.1186/s13640-018-0287-5
+<br/>EURASIP Journal on Image
+<br/>and Video Processing
+<br/>R ES EAR CH
+<br/>Double regularized matrix factorization for
+<br/>image classification and clustering
+<br/>Open Access
+</td></tr><tr><td>b3cb91a08be4117d6efe57251061b62417867de9</td><td>T. Swearingen and A. Ross. "A label propagation approach for predicting missing biographic labels in
+<br/>A Label Propagation Approach for
+<br/>Predicting Missing Biographic Labels
+<br/>in Face-Based Biometric Records
</td></tr><tr><td>b3c60b642a1c64699ed069e3740a0edeabf1922c</td><td>Max-Margin Object Detection
</td></tr><tr><td>b3f7c772acc8bc42291e09f7a2b081024a172564</td><td> www.ijmer.com Vol. 3, Issue. 5, Sep - Oct. 2013 pp-3225-3230 ISSN: 2249-6645
<br/>International Journal of Modern Engineering Research (IJMER)
@@ -2287,6 +2651,15 @@
<br/><b></b><br/>
</td></tr><tr><td>b32631f456397462b3530757f3a73a2ccc362342</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
<br/>3069
+</td></tr><tr><td>b3afa234996f44852317af382b98f5f557cab25a</td><td></td></tr><tr><td>df90850f1c153bfab691b985bfe536a5544e438b</td><td>FACE TRACKING ALGORITHM ROBUST TO POSE,
+<br/>ILLUMINATION AND FACE EXPRESSION CHANGES: A 3D
+<br/>PARAMETRIC MODEL APPROACH
+<br/><b></b><br/>via Bramante 65 - 26013, Crema (CR), Italy
+<br/>Luigi Arnone, Fabrizio Beverina
+<br/>STMicroelectronics - Advanced System Technology Group
+<br/>via Olivetti 5 - 20041, Agrate Brianza, Italy
+<br/>Keywords:
+<br/>Face tracking, expression changes, FACS, illumination changes.
</td></tr><tr><td>df8da144a695269e159fb0120bf5355a558f4b02</td><td>International Journal of Computer Applications (0975 – 8887)
<br/>International Conference on Recent Trends in engineering & Technology - 2013(ICRTET'2013)
<br/>Face Recognition using PCA and Eigen Face
@@ -2295,6 +2668,8 @@
<br/>Sinhgad Academy of Engineering
<br/>EXTC Department
<br/>Pune, India
+</td></tr><tr><td>df577a89830be69c1bfb196e925df3055cafc0ed</td><td>Shift: A Zero FLOP, Zero Parameter Alternative to Spatial Convolutions
+<br/>UC Berkeley
</td></tr><tr><td>dfabe7ef245ca68185f4fcc96a08602ee1afb3f7</td><td></td></tr><tr><td>df51dfe55912d30fc2f792561e9e0c2b43179089</td><td>Face Hallucination using Linear Models of Coupled
<br/>Sparse Support
<br/>grid and fuse them to suppress the aliasing caused by under-
@@ -2309,6 +2684,11 @@
<br/>Learning Deep Sharable and Structural
<br/>Detectors for Face Alignment
</td></tr><tr><td>dfa80e52b0489bc2585339ad3351626dee1a8395</td><td>Human Action Forecasting by Learning Task Grammars
+</td></tr><tr><td>dfecaedeaf618041a5498cd3f0942c15302e75c3</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>A Recursive Framework for Expression Recognition: From
+<br/>Web Images to Deep Models to Game Dataset
+<br/>Received: date / Accepted: date
</td></tr><tr><td>df5fe0c195eea34ddc8d80efedb25f1b9034d07d</td><td>Robust Modified Active Shape Model for Automatic Facial Landmark
<br/>Annotation of Frontal Faces
</td></tr><tr><td>df674dc0fc813c2a6d539e892bfc74f9a761fbc8</td><td>IOSR Journal of Computer Engineering (IOSR-JCE)
@@ -2319,15 +2699,22 @@
<br/> 1.Ms.Dhanashri Shirkey , 2Prof.Dr.S.R.Gupta,
<br/>M.E(Scholar),Department Computer Science & Engineering, PRMIT & R, Badnera
<br/>Asstt.Prof. Department Computer Science & Engineering, PRMIT & R, Badnera
+</td></tr><tr><td>da4170c862d8ae39861aa193667bfdbdf0ecb363</td><td>Multi-task CNN Model for Attribute Prediction
</td></tr><tr><td>da15344a4c10b91d6ee2e9356a48cb3a0eac6a97</td><td></td></tr><tr><td>da5bfddcfe703ca60c930e79d6df302920ab9465</td><td></td></tr><tr><td>dac2103843adc40191e48ee7f35b6d86a02ef019</td><td>854
<br/>Unsupervised Celebrity Face Naming in Web Videos
</td></tr><tr><td>dae420b776957e6b8cf5fbbacd7bc0ec226b3e2e</td><td>RECOGNIZING EMOTIONS IN SPONTANEOUS FACIAL EXPRESSIONS
<br/>Institut f¨ur Nachrichtentechnik
<br/>Universit¨at Karlsruhe (TH), Germany
-</td></tr><tr><td>daba8f0717f3f47c272f018d0a466a205eba6395</td><td></td></tr><tr><td>b41374f4f31906cf1a73c7adda6c50a78b4eb498</td><td>This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
+</td></tr><tr><td>daba8f0717f3f47c272f018d0a466a205eba6395</td><td></td></tr><tr><td>daefac0610fdeff415c2a3f49b47968d84692e87</td><td>New Orleans, Louisiana, June 1 - 6, 2018. c(cid:13)2018 Association for Computational Linguistics
+<br/>Proceedings of NAACL-HLT 2018, pages 1481–1491
+<br/>1481
+</td></tr><tr><td>b49affdff167f5d170da18de3efa6fd6a50262a2</td><td>Author manuscript, published in "Workshop on Faces in 'Real-Life' Images: Detection, Alignment, and Recognition, Marseille : France
+<br/>(2008)"
+</td></tr><tr><td>b41374f4f31906cf1a73c7adda6c50a78b4eb498</td><td>This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
<br/>Iterative Gaussianization: From ICA to
<br/>Random Rotations
-</td></tr><tr><td>b4d7ca26deb83cec1922a6964c1193e8dd7270e7</td><td></td></tr><tr><td>b40290a694075868e0daef77303f2c4ca1c43269</td><td>第 40 卷 第 4 期
+</td></tr><tr><td>b4d7ca26deb83cec1922a6964c1193e8dd7270e7</td><td></td></tr><tr><td>b4ee64022cc3ccd14c7f9d4935c59b16456067d3</td><td>Unsupervised Cross-Domain Image Generation
+</td></tr><tr><td>b40290a694075868e0daef77303f2c4ca1c43269</td><td>第 40 卷 第 4 期
<br/>2014 年 4 月
<br/>自 动 化 学 报
<br/>ACTA AUTOMATICA SINICA
@@ -2345,6 +2732,9 @@
<br/>DOI 10.3724/SP.J.1004.2014.00615
<br/>Combining Local and Global Information for Hair Shape Modeling
<br/>AI Hai-Zhou1
+</td></tr><tr><td>a2359c0f81a7eb032cff1fe45e3b80007facaa2a</td><td>Towards Structured Analysis of Broadcast Badminton Videos
+<br/>C.V.Jawahar
+<br/>CVIT, KCIS, IIIT Hyderabad
</td></tr><tr><td>a2d9c9ed29bbc2619d5e03320e48b45c15155195</td><td></td></tr><tr><td>a2b54f4d73bdb80854aa78f0c5aca3d8b56b571d</td><td></td></tr><tr><td>a27735e4cbb108db4a52ef9033e3a19f4dc0e5fa</td><td>Intention from Motion
</td></tr><tr><td>a50b4d404576695be7cd4194a064f0602806f3c4</td><td>In Proceedings of BMVC, Edimburgh, UK, September 2006
<br/>Efficiently estimating facial expression and
@@ -2379,7 +2769,9 @@
<br/>Driver Assistance: Issues, Algorithms,
<br/>and On-Road Evaluations
<br/>Mohan Manubhai Trivedi, Fellow, IEEE
-</td></tr><tr><td>a5c04f2ad6a1f7c50b6aa5b1b71c36af76af06be</td><td></td></tr><tr><td>a503eb91c0bce3a83bf6f524545888524b29b166</td><td></td></tr><tr><td>bd9eb65d9f0df3379ef96e5491533326e9dde315</td><td></td></tr><tr><td>bd07d1f68486052b7e4429dccecdb8deab1924db</td><td></td></tr><tr><td>bd8e2d27987be9e13af2aef378754f89ab20ce10</td><td></td></tr><tr><td>bd2d7c7f0145028e85c102fe52655c2b6c26aeb5</td><td>Attribute-based People Search: Lessons Learnt from a
+</td></tr><tr><td>a5c04f2ad6a1f7c50b6aa5b1b71c36af76af06be</td><td></td></tr><tr><td>a503eb91c0bce3a83bf6f524545888524b29b166</td><td></td></tr><tr><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td><td>Moments in Time Dataset: one million
+<br/>videos for event understanding
+</td></tr><tr><td>bd9eb65d9f0df3379ef96e5491533326e9dde315</td><td></td></tr><tr><td>bd07d1f68486052b7e4429dccecdb8deab1924db</td><td></td></tr><tr><td>bd8e2d27987be9e13af2aef378754f89ab20ce10</td><td></td></tr><tr><td>bd2d7c7f0145028e85c102fe52655c2b6c26aeb5</td><td>Attribute-based People Search: Lessons Learnt from a
<br/>Practical Surveillance System
<br/>Rogerio Feris
<br/>IBM Watson
@@ -2389,20 +2781,91 @@
<br/>Lisa Brown
<br/>IBM Watson
<br/>IBM Watson
+</td></tr><tr><td>bdbba95e5abc543981fb557f21e3e6551a563b45</td><td>International Journal of Computational Intelligence and Applications
+<br/>Vol. 17, No. 2 (2018) 1850008 (15 pages)
+<br/>#.c The Author(s)
+<br/>DOI: 10.1142/S1469026818500086
+<br/>Speeding up the Hyperparameter Optimization of Deep
+<br/>Convolutional Neural Networks
+<br/>Knowledge Technology, Department of Informatics
+<br/>Universit€at Hamburg
+<br/>Vogt-K€olln-Str. 30, Hamburg 22527, Germany
+<br/>Received 15 August 2017
+<br/>Accepted 23 March 2018
+<br/>Published 18 June 2018
+<br/>Most learning algorithms require the practitioner to manually set the values of many hyper-
+<br/>parameters before the learning process can begin. However, with modern algorithms, the
+<br/>evaluation of a given hyperparameter setting can take a considerable amount of time and the
+<br/>search space is often very high-dimensional. We suggest using a lower-dimensional represen-
+<br/>tation of the original data to quickly identify promising areas in the hyperparameter space. This
+<br/>information can then be used to initialize the optimization algorithm for the original, higher-
+<br/>dimensional data. We compare this approach with the standard procedure of optimizing the
+<br/>hyperparameters only on the original input.
+<br/>We perform experiments with various state-of-the-art hyperparameter optimization algo-
+<br/>rithms such as random search, the tree of parzen estimators (TPEs), sequential model-based
+<br/>algorithm con¯guration (SMAC), and a genetic algorithm (GA). Our experiments indicate that
+<br/>it is possible to speed up the optimization process by using lower-dimensional data repre-
+<br/>sentations at the beginning, while increasing the dimensionality of the input later in the opti-
+<br/>mization process. This is independent of the underlying optimization procedure, making the
+<br/>approach promising for many existing hyperparameter optimization algorithms.
+<br/>Keywords: Hyperparameter optimization; hyperparameter importance; convolutional neural
+<br/>networks; genetic algorithm; Bayesian optimization.
+<br/>1. Introduction
+<br/>The performance of many contemporary machine learning algorithms depends cru-
+<br/>cially on the speci¯c initialization of hyperparameters such as the general architec-
+<br/>ture, the learning rate, regularization parameters, and many others.1,2 Indeed,
+<br/>This is an Open Access article published by World Scienti¯c Publishing Company. It is distributed under
+<br/>the terms of the Creative Commons Attribution 4.0 (CC-BY) License. Further distribution of this work is
+<br/>permitted, provided the original work is properly cited.
+<br/>1850008-1
+<br/>Int. J. Comp. Intel. Appl. 2018.17. Downloaded from www.worldscientific.comby WSPC on 07/18/18. Re-use and distribution is strictly not permitted, except for Open Access articles. </td></tr><tr><td>d1dfdc107fa5f2c4820570e369cda10ab1661b87</td><td>Super SloMo: High Quality Estimation of Multiple Intermediate Frames
+<br/>for Video Interpolation
+<br/>Erik Learned-Miller1
+<br/>1UMass Amherst
+<br/>2NVIDIA 3UC Merced
+</td></tr><tr><td>d1a43737ca8be02d65684cf64ab2331f66947207</td><td>IJB–S: IARPA Janus Surveillance Video Benchmark (cid:3)
+<br/>Kevin O’Connor z
</td></tr><tr><td>d1082eff91e8009bf2ce933ac87649c686205195</td><td>(will be inserted by the editor)
<br/>Pruning of Error Correcting Output Codes by
<br/>Optimization of Accuracy-Diversity Trade off
<br/>S¨ureyya ¨Oz¨o˘g¨ur Aky¨uz · Terry
<br/>Windeatt · Raymond Smith
<br/>Received: date / Accepted: date
-</td></tr><tr><td>d6102a7ddb19a185019fd2112d2f29d9258f6dec</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+</td></tr><tr><td>d69df51cff3d6b9b0625acdcbea27cd2bbf4b9c0</td><td></td></tr><tr><td>d6102a7ddb19a185019fd2112d2f29d9258f6dec</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
<br/>3721
</td></tr><tr><td>d6bfa9026a563ca109d088bdb0252ccf33b76bc6</td><td>Unsupervised Temporal Segmentation of Facial Behaviour
<br/>Department of Computer Science and Engineering, IIT Kanpur
-</td></tr><tr><td>d6fb606e538763282e3942a5fb45c696ba38aee6</td><td></td></tr><tr><td>bcc172a1051be261afacdd5313619881cbe0f676</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+</td></tr><tr><td>d6fb606e538763282e3942a5fb45c696ba38aee6</td><td></td></tr><tr><td>bc9003ad368cb79d8a8ac2ad025718da5ea36bc4</td><td>Technische Universit¨at M¨unchen
+<br/>Bildverstehen und Intelligente Autonome Systeme
+<br/>Facial Expression Recognition With A
+<br/>Three-Dimensional Face Model
+<br/>Vollst¨andiger Abdruck der von der Fakult¨at f¨ur Informatik der Technischen Uni-
+<br/>versit¨at M¨unchen zur Erlangung des akademischen Grades eines
+<br/>Doktors der Naturwissenschaften
+<br/>genehmigten Dissertation.
+<br/>Vorsitzender:
+<br/>Univ.-Prof. Dr. Johann Schlichter
+<br/>Pr¨ufer der Dissertation: 1. Univ.-Prof. Dr. Bernd Radig (i.R.)
+<br/>2. Univ.-Prof. Gudrun J. Klinker, Ph.D.
+<br/>Die Dissertation wurde am 04.07.2011 bei der Technischen Universit¨at M¨unchen
+<br/>eingereicht und durch die Fakult¨at f¨ur Informatik am 02.12.2011 angenommen.
+</td></tr><tr><td>bcc346f4a287d96d124e1163e4447bfc47073cd8</td><td></td></tr><tr><td>bcc172a1051be261afacdd5313619881cbe0f676</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
<br/>2197
<br/>ICASSP 2017
-</td></tr><tr><td>bcfeac1e5c31d83f1ed92a0783501244dde5a471</td><td></td></tr><tr><td>bc2852fa0a002e683aad3fb0db5523d1190d0ca5</td><td></td></tr><tr><td>bcb99d5150d792001a7d33031a3bd1b77bea706b</td><td></td></tr><tr><td>bcac3a870501c5510df80c2a5631f371f2f6f74a</td><td>CVPR
+</td></tr><tr><td>bcfeac1e5c31d83f1ed92a0783501244dde5a471</td><td></td></tr><tr><td>bc2852fa0a002e683aad3fb0db5523d1190d0ca5</td><td></td></tr><tr><td>bcb99d5150d792001a7d33031a3bd1b77bea706b</td><td></td></tr><tr><td>bc811a66855aae130ca78cd0016fd820db1603ec</td><td>Towards three-dimensional face recognition in the real
+<br/>To cite this version:
+<br/>HAL Id: tel-00998798
+<br/>https://tel.archives-ouvertes.fr/tel-00998798
+<br/>Submitted on 2 Jun 2014
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>teaching and research institutions in France or
+<br/>destin´ee au d´epˆot et `a la diffusion de documents
+<br/>recherche fran¸cais ou ´etrangers, des laboratoires
+</td></tr><tr><td>bc9af4c2c22a82d2c84ef7c7fcc69073c19b30ab</td><td>MoCoGAN: Decomposing Motion and Content for Video Generation
+<br/>Snap Research
+<br/>NVIDIA
+</td></tr><tr><td>bcac3a870501c5510df80c2a5631f371f2f6f74a</td><td>CVPR
<br/>#1387
<br/>000
<br/>001
@@ -2464,7 +2927,17 @@
<br/>Structured Face Hallucination
<br/>Anonymous CVPR submission
<br/>Paper ID 1387
-</td></tr><tr><td>aed321909bb87c81121c841b21d31509d6c78f69</td><td></td></tr><tr><td>ae936628e78db4edb8e66853f59433b8cc83594f</td><td></td></tr><tr><td>aebb9649bc38e878baef082b518fa68f5cda23a5</td><td>
+</td></tr><tr><td>aed321909bb87c81121c841b21d31509d6c78f69</td><td></td></tr><tr><td>ae936628e78db4edb8e66853f59433b8cc83594f</td><td></td></tr><tr><td>ae2cf545565c157813798910401e1da5dc8a6199</td><td>Mahkonen et al. EURASIP Journal on Image and Video
+<br/>Processing (2018) 2018:61
+<br/>https://doi.org/10.1186/s13640-018-0303-9
+<br/>EURASIP Journal on Image
+<br/>and Video Processing
+<br/>RESEARCH
+<br/>Open Access
+<br/>Cascade of Boolean detector
+<br/>combinations
+</td></tr><tr><td>aebb9649bc38e878baef082b518fa68f5cda23a5</td><td>
+</td></tr><tr><td>aeff403079022683b233decda556a6aee3225065</td><td>DeepFace: Face Generation using Deep Learning
</td></tr><tr><td>ae753fd46a744725424690d22d0d00fb05e53350</td><td>000
<br/>001
<br/>002
@@ -2532,12 +3005,50 @@
</td></tr><tr><td>d83ae5926b05894fcda0bc89bdc621e4f21272da</td><td>version of the following thesis:
<br/>Frugal Forests: Learning a Dynamic and Cost Sensitive
<br/>Feature Extraction Policy for Anytime Activity Classification
+</td></tr><tr><td>d89cfed36ce8ffdb2097c2ba2dac3e2b2501100d</td><td>Robust Face Recognition via Multimodal Deep
+<br/>Face Representation
</td></tr><tr><td>ab8f9a6bd8f582501c6b41c0e7179546e21c5e91</td><td>Nonparametric Face Verification Using a Novel
<br/>Face Representation
+</td></tr><tr><td>ab58a7db32683aea9281c188c756ddf969b4cdbd</td><td>Efficient Solvers for Sparse Subspace Clustering
+</td></tr><tr><td>ab989225a55a2ddcd3b60a99672e78e4373c0df1</td><td>Sample, Computation vs Storage Tradeoffs for
+<br/>Classification Using Tensor Subspace Models
</td></tr><tr><td>ab6776f500ed1ab23b7789599f3a6153cdac84f7</td><td>International Journal of Scientific & Engineering Research, Volume 6, Issue 4, April-2015 1212
<br/>ISSN 2229-5518
<br/>A Survey on Various Facial Expression
<br/>Techniques
+</td></tr><tr><td>ab2b09b65fdc91a711e424524e666fc75aae7a51</td><td>Multi-modal Biomarkers to Discriminate Cognitive State*
+<br/>1MIT Lincoln Laboratory, Lexington, Massachusetts, USA
+<br/>2USARIEM, 3NSRDEC
+<br/>1. Introduction
+<br/>Multimodal biomarkers based on behavorial, neurophysiolgical, and cognitive measurements have
+<br/>recently obtained increasing popularity in the detection of cognitive stress- and neurological-based
+<br/>disorders. Such conditions are significantly and adversely affecting human performance and quality
+<br/>of life for a large fraction of the world’s population. Example modalities used in detection of these
+<br/>conditions include voice, facial expression, physiology, eye tracking, gait, and EEG analysis.
+<br/>Toward the goal of finding simple, noninvasive means to detect, predict and monitor cognitive
+<br/>stress and neurological conditions, MIT Lincoln Laboratory is developing biomarkers that satisfy
+<br/>three criteria. First, we seek biomarkers that reflect core components of cognitive status such as
+<br/>working memory capacity, processing speed, attention, and arousal. Second, and as importantly, we
+<br/>seek biomarkers that reflect timing and coordination relations both within components of each
+<br/>modality and across different modalities. This is based on the hypothesis that neural coordination
+<br/>across different parts of the brain is essential in cognition (Figure 1). An example of timing and
+<br/>coordination within a modality is the set of finely timed and synchronized physiological
+<br/>components of speech production, while an example of coordination across modalities is the timing
+<br/>and synchrony that occurs across speech and facial expression while speaking. Third, we seek
+<br/>multimodal biomarkers that contribute in a complementary fashion under various channel and
+<br/>background conditions. In this chapter, as an illustration of this biomarker approach we focus on
+<br/>cognitive stress and the particular case of detecting different cognitive load levels. We also briefly
+<br/>show how similar feature-extraction principles can be applied to a neurological condition through
+<br/>the example of major depression disorder (MDD). MDD is one of several neurological disorders
+<br/>where multi-modal biomarkers based on principles of timing and coordination are important for
+<br/>detection [11]-[22]. In our cognitive load experiments, we use two easily obtained noninvasive
+<br/>modalities, voice and face, and show how these two modalities can be fused to produce results on
+<br/>par with more invasive, “gold-standard” EEG measurements. Vocal and facial biomarkers will also
+<br/>be used in our MDD case study. In both application areas we focus on timing and coordination
+<br/>relations within the components of each modality.
+<br/>* Distribution A: public release.This work is sponsored by the Assistant Secretary of Defense for Research & Engineering under Air Force contract
+<br/>#FA8721-05-C-0002. Opinions,interpretations, conclusions, and recommendations are those of the authors and are not necessarily endorsed by the United States
+<br/>Government.
</td></tr><tr><td>ab87dfccb1818bdf0b41d732da1f9335b43b74ae</td><td>SUBMITTED TO IEEE TRANSACTIONS ON SIGNAL PROCESSING
<br/>Structured Dictionary Learning for Classification
</td></tr><tr><td>ab1dfcd96654af0bf6e805ffa2de0f55a73c025d</td><td></td></tr><tr><td>abeda55a7be0bbe25a25139fb9a3d823215d7536</td><td>UNIVERSITATPOLITÈCNICADECATALUNYAProgramadeDoctorat:AUTOMÀTICA,ROBÒTICAIVISIÓTesiDoctoralUnderstandingHuman-CentricImages:FromGeometrytoFashionEdgarSimoSerraDirectors:FrancescMorenoNoguerCarmeTorrasMay2015 </td></tr><tr><td>ab1900b5d7cf3317d17193e9327d57b97e24d2fc</td><td></td></tr><tr><td>ab8fb278db4405f7db08fa59404d9dd22d38bc83</td><td>UNIVERSITÉ DE GENÈVE
@@ -2555,11 +3066,18 @@
<br/>GENÈVE
<br/>Repro-Mail - Université de Genève
<br/>2011
-</td></tr><tr><td>e5737ffc4e74374b0c799b65afdbf0304ff344cb</td><td></td></tr><tr><td>e27c92255d7ccd1860b5fb71c5b1277c1648ed1e</td><td></td></tr><tr><td>e200c3f2849d56e08056484f3b6183aa43c0f13a</td><td></td></tr><tr><td>f437b3884a9e5fab66740ca2a6f1f3a5724385ea</td><td>Human Identification Technical Challenges
+</td></tr><tr><td>e5737ffc4e74374b0c799b65afdbf0304ff344cb</td><td></td></tr><tr><td>e5823a9d3e5e33e119576a34cb8aed497af20eea</td><td>DocFace+: ID Document to Selfie* Matching
+</td></tr><tr><td>e5dfd17dbfc9647ccc7323a5d62f65721b318ba9</td><td></td></tr><tr><td>e56c4c41bfa5ec2d86c7c9dd631a9a69cdc05e69</td><td>Human Activity Recognition Based on Wearable
+<br/>Sensor Data: A Standardization of the
+<br/>State-of-the-Art
+<br/>Smart Surveillance Interest Group, Computer Science Department
+<br/>Universidade Federal de Minas Gerais, Brazil
+</td></tr><tr><td>e27c92255d7ccd1860b5fb71c5b1277c1648ed1e</td><td></td></tr><tr><td>e200c3f2849d56e08056484f3b6183aa43c0f13a</td><td></td></tr><tr><td>f437b3884a9e5fab66740ca2a6f1f3a5724385ea</td><td>Human Identification Technical Challenges
<br/>DARPA
<br/>3701 N. Fairfax Dr
<br/>Arlington, VA 22203
-</td></tr><tr><td>f4c01fc79c7ead67899f6fe7b79dd1ad249f71b0</td><td></td></tr><tr><td>f4373f5631329f77d85182ec2df6730cbd4686a9</td><td>Soft Computing manuscript No.
+</td></tr><tr><td>f442a2f2749f921849e22f37e0480ac04a3c3fec</td><td></td></tr><tr><td>f4f6fc473effb063b7a29aa221c65f64a791d7f4</td><td>Downloaded From: https://www.spiedigitallibrary.org/journals/Journal-of-Electronic-Imaging on 4/20/2018 Terms of Use: https://www.spiedigitallibrary.org/terms-of-use
+<br/>FacialexpressionrecognitioninthewildbasedonmultimodaltexturefeaturesBoSunLiandongLiGuoyanZhouJunHeBoSun,LiandongLi,GuoyanZhou,JunHe,“Facialexpressionrecognitioninthewildbasedonmultimodaltexturefeatures,”J.Electron.Imaging25(6),061407(2016),doi:10.1117/1.JEI.25.6.061407. </td></tr><tr><td>f4c01fc79c7ead67899f6fe7b79dd1ad249f71b0</td><td></td></tr><tr><td>f4373f5631329f77d85182ec2df6730cbd4686a9</td><td>Soft Computing manuscript No.
<br/>(will be inserted by the editor)
<br/>Recognizing Gender from Human Facial Regions using
<br/>Genetic Algorithm
@@ -2571,8 +3089,38 @@
</td></tr><tr><td>f3fcaae2ea3e998395a1443c87544f203890ae15</td><td></td></tr><tr><td>f3d9e347eadcf0d21cb0e92710bc906b22f2b3e7</td><td>NosePose: a competitive, landmark-free
<br/>methodology for head pose estimation in the wild
<br/>IMAGO Research Group - Universidade Federal do Paran´a
-</td></tr><tr><td>f355e54ca94a2d8bbc598e06e414a876eb62ef99</td><td></td></tr><tr><td>ebedc841a2c1b3a9ab7357de833101648281ff0e</td><td></td></tr><tr><td>eb526174fa071345ff7b1fad1fad240cd943a6d7</td><td>Deeply Vulnerable – A Study of the Robustness of Face Recognition to
+</td></tr><tr><td>f355e54ca94a2d8bbc598e06e414a876eb62ef99</td><td></td></tr><tr><td>f3ea181507db292b762aa798da30bc307be95344</td><td>Covariance Pooling for Facial Expression Recognition
+<br/>†Computer Vision Lab, ETH Zurich, Switzerland
+<br/>‡VISICS, KU Leuven, Belgium
+</td></tr><tr><td>f3cf10c84c4665a0b28734f5233d423a65ef1f23</td><td>Title
+<br/>Temporal Exemplar-based Bayesian Networks for facial
+<br/>expression recognition
+<br/>Author(s)
+<br/>Shang, L; Chan, KP
+<br/>Citation
+<br/>Proceedings - 7Th International Conference On Machine
+<br/>Learning And Applications, Icmla 2008, 2008, p. 16-22
+<br/>Issued Date
+<br/>2008
+<br/>URL
+<br/>http://hdl.handle.net/10722/61208
+<br/>Rights
+<br/>This work is licensed under a Creative Commons Attribution-
+<br/>NonCommercial-NoDerivatives 4.0 International License.;
+<br/>International Conference on Machine Learning and Applications
+<br/>Proceedings. Copyright © IEEE.; ©2008 IEEE. Personal use of
+<br/>this material is permitted. However, permission to
+<br/>reprint/republish this material for advertising or promotional
+<br/>purposes or for creating new collective works for resale or
+<br/>redistribution to servers or lists, or to reuse any copyrighted
+<br/>component of this work in other works must be obtained from
+<br/>the IEEE.
+</td></tr><tr><td>f3b7938de5f178e25a3cf477107c76286c0ad691</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, MARCH 2017
+<br/>Object Detection with Deep Learning: A Review
+</td></tr><tr><td>ebedc841a2c1b3a9ab7357de833101648281ff0e</td><td></td></tr><tr><td>eb526174fa071345ff7b1fad1fad240cd943a6d7</td><td>Deeply Vulnerable – A Study of the Robustness of Face Recognition to
<br/>Presentation Attacks
+</td></tr><tr><td>eb566490cd1aa9338831de8161c6659984e923fd</td><td>From Lifestyle Vlogs to Everyday Interactions
+<br/>EECS Department, UC Berkeley
</td></tr><tr><td>eb9312458f84a366e98bd0a2265747aaed40b1a6</td><td>1-4244-1437-7/07/$20.00 ©2007 IEEE
<br/>IV - 473
<br/>ICIP 2007
@@ -2583,7 +3131,9 @@
<br/>representation learning using various deep networks
<br/>School of Electrical Engineering, KAIST,
<br/>Guseong-dong, Yuseong-gu, Dajeon, Rep. of Korea
-</td></tr><tr><td>ebb9d53668205c5797045ba130df18842e3eadef</td><td></td></tr><tr><td>c7e4c7be0d37013de07b6d829a3bf73e1b95ad4e</td><td>The International Journal of Multimedia & Its Applications (IJMA) Vol.5, No.5, October 2013
+</td></tr><tr><td>ebb9d53668205c5797045ba130df18842e3eadef</td><td></td></tr><tr><td>eb48a58b873295d719827e746d51b110f5716d6c</td><td>Face Alignment Using K-cluster Regression Forests
+<br/>With Weighted Splitting
+</td></tr><tr><td>c7e4c7be0d37013de07b6d829a3bf73e1b95ad4e</td><td>The International Journal of Multimedia & Its Applications (IJMA) Vol.5, No.5, October 2013
<br/>DYNEMO: A VIDEO DATABASE OF NATURAL FACIAL
<br/>EXPRESSIONS OF EMOTIONS
<br/>1LIP, Univ. Grenoble Alpes, BP 47 - 38040 Grenoble Cedex 9, France
@@ -2593,9 +3143,28 @@
<br/>EMPIRICAL STUDY
</td></tr><tr><td>c758b9c82b603904ba8806e6193c5fefa57e9613</td><td>Heterogeneous Face Recognition with CNNs
<br/>INRIA Grenoble, Laboratoire Jean Kuntzmann
+</td></tr><tr><td>c7c8d150ece08b12e3abdb6224000c07a6ce7d47</td><td>DeMeshNet: Blind Face Inpainting for Deep MeshFace Verification
+<br/>National Laboratory of Pattern Recognition, CASIA
+<br/>Center for Research on Intelligent Perception and Computing, CASIA
+</td></tr><tr><td>c038beaa228aeec174e5bd52460f0de75e9cccbe</td><td>Temporal Segment Networks for Action
+<br/>Recognition in Videos
</td></tr><tr><td>c043f8924717a3023a869777d4c9bee33e607fb5</td><td>Emotion Separation Is Completed Early and It Depends
<br/>on Visual Field Presentation
<br/><b>Lab for Human Brain Dynamics, RIKEN Brain Science Institute, Wakoshi, Saitama, Japan, 2 Lab for Human Brain Dynamics, AAI Scientific Cultural Services Ltd., Nicosia</b><br/>Cyprus
+</td></tr><tr><td>c05a7c72e679745deab9c9d7d481f7b5b9b36bdd</td><td>NPS-CS-11-005
+<br/>
+<br/>
+<br/>NAVAL
+<br/>POSTGRADUATE
+<br/>SCHOOL
+<br/>MONTEREY, CALIFORNIA
+<br/>by
+<br/>BIOMETRIC CHALLENGES FOR FUTURE DEPLOYMENTS:
+<br/>A STUDY OF THE IMPACT OF GEOGRAPHY, CLIMATE, CULTURE,
+<br/> AND SOCIAL CONDITIONS ON THE EFFECTIVE
+<br/>COLLECTION OF BIOMETRICS
+<br/>April 2011
+<br/>Approved for public release; distribution is unlimited
</td></tr><tr><td>c02847a04a99a5a6e784ab580907278ee3c12653</td><td>Fine Grained Video Classification for
<br/>Endangered Bird Species Protection
<br/>Non-Thesis MS Final Report
@@ -2625,6 +3194,9 @@
<br/>because a higher resolution image will require larger filters and deeper networks which is turn hard to
<br/>train [3]. So it is not clear whether the low resolution will cause challenge for fine-grained
<br/>classification task. Last but not the least, there is not a large training database like PASCAL, MNIST
+</td></tr><tr><td>c0c8d720658374cc1ffd6116554a615e846c74b5</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>Modeling Multimodal Clues in a Hybrid Deep
+<br/>Learning Framework for Video Classification
</td></tr><tr><td>c0d5c3aab87d6e8dd3241db1d931470c15b9e39d</td><td></td></tr><tr><td>eee8a37a12506ff5df72c402ccc3d59216321346</td><td>Uredniki:
<br/>dr. Tomaž Erjavec
<br/>Odsek za tehnologije znanja
@@ -2665,6 +3237,22 @@
<br/>Video and Display Processing
<br/>Philips Research USA
<br/>Briarcliff Manor, NY 10510
+</td></tr><tr><td>eedfb384a5e42511013b33104f4cd3149432bd9e</td><td>Multimodal Probabilistic Person
+<br/>Tracking and Identification
+<br/>in Smart Spaces
+<br/>zur Erlangung des akademischen Grades eines
+<br/>Doktors der Ingenieurwissenschaften
+<br/>der Fakultät für Informatik
+<br/>der Universität Fridericiana zu Karlsruhe (TH)
+<br/>genehmigte
+<br/>Dissertation
+<br/>von
+<br/>aus Karlsruhe
+<br/>Tag der mündlichen Prüfung: 20.11.2009
+<br/>Erster Gutachter:
+<br/>Zweiter Gutachter:
+<br/>Prof. Dr. A. Waibel
+<br/>Prof. Dr. R. Stiefelhagen
</td></tr><tr><td>c9424d64b12a4abe0af201e7b641409e182babab</td><td>Article
<br/>Which, When, and How: Hierarchical Clustering with
<br/>Human–Machine Cooperation
@@ -2689,8 +3277,14 @@
<br/>for Solving Nonlinear Least Squares
<br/>Problems in Computer Vision
</td></tr><tr><td>fdf533eeb1306ba418b09210387833bdf27bb756</td><td>951
+</td></tr><tr><td>fdda5852f2cffc871fd40b0cb1aa14cea54cd7e3</td><td>Im2Flow: Motion Hallucination from Static Images for Action Recognition
+<br/>UT Austin
+<br/>UT Austin
+<br/>UT Austin
</td></tr><tr><td>fdfaf46910012c7cdf72bba12e802a318b5bef5a</td><td>Computerized Face Recognition in Renaissance
<br/>Portrait Art
+</td></tr><tr><td>fd15e397629e0241642329fc8ee0b8cd6c6ac807</td><td>Semi-Supervised Clustering with Neural Networks
+<br/>IIIT-Delhi, India
</td></tr><tr><td>fdca08416bdadda91ae977db7d503e8610dd744f</td><td>
<br/>ICT-2009.7.1
<br/>KSERA Project
@@ -2708,7 +3302,51 @@
<br/>under the 7th Framework Programme (FP7) for Research and Technological Development under grant
<br/>under the 7th Framework Programme (FP7) for Research and Technological Development under grant
<br/>agreement n°2010-248085.
-</td></tr><tr><td>f2e9494d0dca9fb6b274107032781d435a508de6</td><td></td></tr><tr><td>f2c568fe945e5743635c13fe5535af157b1903d1</td><td></td></tr><tr><td>f26097a1a479fb6f32b27a93f8f32609cfe30fdc</td><td></td></tr><tr><td>f214bcc6ecc3309e2efefdc21062441328ff6081</td><td></td></tr><tr><td>f519723238701849f1160d5a9cedebd31017da89</td><td>Impact of multi-focused images on recognition of soft biometric traits
+</td></tr><tr><td>fdaf65b314faee97220162980e76dbc8f32db9d6</td><td>Accepted Manuscript
+<br/>Face recognition using both visible light image and near-infrared image and a deep
+<br/>network
+<br/>PII:
+<br/>DOI:
+<br/>Reference:
+<br/>S2468-2322(17)30014-8
+<br/>10.1016/j.trit.2017.03.001
+<br/>TRIT 41
+<br/>To appear in:
+<br/>CAAI Transactions on Intelligence Technology
+<br/>Received Date: 30 January 2017
+<br/>Accepted Date: 28 March 2017
+<br/>Please cite this article as: K. Guo, S. Wu, Y. Xu, Face recognition using both visible light image and
+<br/>near-infrared image and a deep network, CAAI Transactions on Intelligence Technology (2017), doi:
+<br/>10.1016/j.trit.2017.03.001.
+<br/>This is a PDF file of an unedited manuscript that has been accepted for publication. As a service to
+<br/>our customers we are providing this early version of the manuscript. The manuscript will undergo
+<br/>copyediting, typesetting, and review of the resulting proof before it is published in its final form. Please
+<br/>note that during the production process errors may be discovered which could affect the content, and all
+<br/>legal disclaimers that apply to the journal pertain.
+</td></tr><tr><td>f2e9494d0dca9fb6b274107032781d435a508de6</td><td></td></tr><tr><td>f2c568fe945e5743635c13fe5535af157b1903d1</td><td></td></tr><tr><td>f26097a1a479fb6f32b27a93f8f32609cfe30fdc</td><td></td></tr><tr><td>f231046d5f5d87e2ca5fae88f41e8d74964e8f4f</td><td>We are IntechOpen,
+<br/>the first native scientific
+<br/>publisher of Open Access books
+<br/>3,350
+<br/>108,000
+<br/>1.7 M
+<br/>Open access books available
+<br/>International authors and editors
+<br/>Downloads
+<br/>Our authors are among the
+<br/>151
+<br/>Countries delivered to
+<br/>TOP 1%
+<br/>12.2%
+<br/>most cited scientists
+<br/>Contributors from top 500 universities
+<br/>Selection of our books indexed in the Book Citation Index
+<br/>in Web of Science™ Core Collection (BKCI)
+<br/>Interested in publishing with us?
+<br/>Numbers displayed above are based on latest data collected.
+<br/>For more information visit www.intechopen.com
+</td></tr><tr><td>f214bcc6ecc3309e2efefdc21062441328ff6081</td><td></td></tr><tr><td>f5770dd225501ff3764f9023f19a76fad28127d4</td><td>Real Time Online Facial Expression Transfer
+<br/>with Single Video Camera
+</td></tr><tr><td>f519723238701849f1160d5a9cedebd31017da89</td><td>Impact of multi-focused images on recognition of soft biometric traits
<br/>aEURECOM, Campus SophiaTech, 450 Route des Chappes, CS 50193 - 06904 Biot Sophia
<br/>
<br/>Antipolis cedex, FRANCE
@@ -2719,17 +3357,35 @@
<br/>SEARCH
<br/>#Student,Cse, CIET, Lam,Guntur, India
<br/>* Assistant Professort,Cse, CIET, Lam,Guntur , India
-</td></tr><tr><td>e3657ab4129a7570230ff25ae7fbaccb4ba9950c</td><td></td></tr><tr><td>e315959d6e806c8fbfc91f072c322fb26ce0862b</td><td>An Efficient Face Recognition System Based on Sub-Window
+</td></tr><tr><td>e393a038d520a073b9835df7a3ff104ad610c552</td><td>Automatic temporal segment
+<br/>detection via bilateral long short-
+<br/>term memory recurrent neural
+<br/>networks
+<br/>detection via bilateral long short-term memory recurrent neural networks,” J.
+<br/>Electron. Imaging 26(2), 020501 (2017), doi: 10.1117/1.JEI.26.2.020501.
+<br/>Downloaded From: http://electronicimaging.spiedigitallibrary.org/ on 03/03/2017 Terms of Use: http://spiedigitallibrary.org/ss/termsofuse.aspx </td></tr><tr><td>e3657ab4129a7570230ff25ae7fbaccb4ba9950c</td><td></td></tr><tr><td>e315959d6e806c8fbfc91f072c322fb26ce0862b</td><td>An Efficient Face Recognition System Based on Sub-Window
<br/>International Journal of Soft Computing and Engineering (IJSCE)
<br/>ISSN: 2231-2307, Volume-1, Issue-6, January 2012
<br/>Extraction Algorithm
+</td></tr><tr><td>e3c011d08d04c934197b2a4804c90be55e21d572</td><td>How to Train Triplet Networks with 100K Identities?
+<br/>Orion Star
+<br/>Beijing, China
+<br/>Orion Star
+<br/>Beijing, China
+<br/>Orion Star
+<br/>Beijing, China
</td></tr><tr><td>e39a0834122e08ba28e7b411db896d0fdbbad9ba</td><td>1368
<br/>Maximum Likelihood Estimation of Depth Maps
<br/>Using Photometric Stereo
</td></tr><tr><td>e3917d6935586b90baae18d938295e5b089b5c62</td><td>152
<br/>Face Localization and Authentication
<br/>Using Color and Depth Images
-</td></tr><tr><td>cfa572cd6ba8dfc2ee8ac3cc7be19b3abff1a8a2</td><td></td></tr><tr><td>cf875336d5a196ce0981e2e2ae9602580f3f6243</td><td>7 What 1
+</td></tr><tr><td>cfa572cd6ba8dfc2ee8ac3cc7be19b3abff1a8a2</td><td></td></tr><tr><td>cfffae38fe34e29d47e6deccfd259788176dc213</td><td>TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. X, NO. X, DECEMBER 2012
+<br/>Matrix Completion for Weakly-supervised
+<br/>Multi-label Image Classification
+</td></tr><tr><td>cfd4004054399f3a5f536df71f9b9987f060f434</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. ??, NO. ??, ?? 20??
+<br/>Person Recognition in Personal Photo Collections
+</td></tr><tr><td>cfb8bc66502fb5f941ecdb22aec1fdbfdb73adce</td><td></td></tr><tr><td>cf875336d5a196ce0981e2e2ae9602580f3f6243</td><td>7 What 1
<br/>Rosalind W. Picard
<br/>It Mean for a Computer to "Have" Emotions?
<br/>There is a lot of talk about giving machines emotions, some of
@@ -2775,6 +3431,17 @@
<br/>´Ecole Polytechnique de Montr´eal,
<br/>Qu´ebec, Canada
<br/>Qu´ebec, Canada
+</td></tr><tr><td>cfa92e17809e8d20ebc73b4e531a1b106d02b38c</td><td>Advances in Data Analysis and Classification manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Parametric Classification with Soft Labels using the
+<br/>Evidential EM Algorithm
+<br/>Linear Discriminant Analysis vs. Logistic Regression
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>cfdc632adcb799dba14af6a8339ca761725abf0a</td><td>Probabilistic Formulations of Regression with Mixed
+<br/>Guidance
+</td></tr><tr><td>cfc30ce53bfc204b8764ebb764a029a8d0ad01f4</td><td>Regularizing Deep Neural Networks by Noise:
+<br/>Its Interpretation and Optimization
+<br/>Dept. of Computer Science and Engineering, POSTECH, Korea
</td></tr><tr><td>cf86616b5a35d5ee777585196736dfafbb9853b5</td><td>This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
<br/>Learning Multiscale Active Facial Patches for
<br/>Expression Analysis
@@ -2782,7 +3449,18 @@
<br/>Detecting Social Relationships in First-Person Views
<br/>Universit`a degli Studi di Modena e Reggio Emilia
<br/>Via Vignolese 905, 41125 Modena - Italy
-</td></tr><tr><td>cac8bb0e393474b9fb3b810c61efdbc2e2c25c29</td><td></td></tr><tr><td>cadba72aa3e95d6dcf0acac828401ddda7ed8924</td><td>THÈSE PRÉSENTÉE À LA FACULTÉ DES SCIENCES
+</td></tr><tr><td>cac8bb0e393474b9fb3b810c61efdbc2e2c25c29</td><td></td></tr><tr><td>cad24ba99c7b6834faf6f5be820dd65f1a755b29</td><td>Understanding hand-object
+<br/>manipulation by modeling the
+<br/>contextual relationship between actions,
+<br/>grasp types and object attributes
+<br/>Journal Title
+<br/>XX(X):1–14
+<br/>c(cid:13)The Author(s) 2016
+<br/>Reprints and permission:
+<br/>sagepub.co.uk/journalsPermissions.nav
+<br/>DOI: 10.1177/ToBeAssigned
+<br/>www.sagepub.com/
+</td></tr><tr><td>cadba72aa3e95d6dcf0acac828401ddda7ed8924</td><td>THÈSE PRÉSENTÉE À LA FACULTÉ DES SCIENCES
<br/>POUR L’OBTENTION DU GRADE DE DOCTEUR ÈS SCIENCES
<br/>Algorithms and VLSI Architectures
<br/>for Low-Power Mobile Face Verification
@@ -2820,6 +3498,20 @@
<br/>Weighted Feature Extraction and Fuzzy Classifier
</td></tr><tr><td>e4391993f5270bdbc621b8d01702f626fba36fc2</td><td>Author manuscript, published in "18th Scandinavian Conference on Image Analysis (2013)"
<br/> DOI : 10.1007/978-3-642-38886-6_31
+</td></tr><tr><td>e4d8ba577cabcb67b4e9e1260573aea708574886</td><td>UM SISTEMA DE RECOMENDAC¸ ˜AO INTELIGENTE BASEADO EM V´IDIO
+<br/>AULAS PARA EDUCAC¸ ˜AO A DIST ˆANCIA
+<br/>Gaspare Giuliano Elias Bruno
+<br/>Tese de Doutorado apresentada ao Programa
+<br/>de P´os-gradua¸c˜ao em Engenharia de Sistemas e
+<br/>Computa¸c˜ao, COPPE, da Universidade Federal
+<br/>do Rio de Janeiro, como parte dos requisitos
+<br/>necess´arios `a obten¸c˜ao do t´ıtulo de Doutor em
+<br/>Engenharia de Sistemas e Computa¸c˜ao.
+<br/>Orientadores: Edmundo Albuquerque de
+<br/>Souza e Silva
+<br/>Rosa Maria Meri Le˜ao
+<br/>Rio de Janeiro
+<br/>Janeiro de 2016
</td></tr><tr><td>e475deadd1e284428b5e6efd8fe0e6a5b83b9dcd</td><td>Accepted in Pattern Recognition Letters
<br/>Pattern Recognition Letters
<br/>journal homepage: www.elsevier.com
@@ -2833,6 +3525,10 @@
</td></tr><tr><td>fe9c460d5ca625402aa4d6dd308d15a40e1010fa</td><td>Neural Architecture for Temporal Emotion
<br/>Classification
<br/>Universit¨at Ulm, Neuroinformatik, Germany
+</td></tr><tr><td>fe7c0bafbd9a28087e0169259816fca46db1a837</td><td></td></tr><tr><td>fe48f0e43dbdeeaf4a03b3837e27f6705783e576</td><td></td></tr><tr><td>fea83550a21f4b41057b031ac338170bacda8805</td><td>Learning a Metric Embedding
+<br/>for Face Recognition
+<br/>using the Multibatch Method
+<br/>Orcam Ltd., Jerusalem, Israel
</td></tr><tr><td>feeb0fd0e254f38b38fe5c1022e84aa43d63f7cc</td><td>EURECOM
<br/>Multimedia Communications Department
<br/>and
@@ -2848,7 +3544,13 @@
<br/>Last update June 1st, 2011
<br/>1EURECOM’s research is partially supported by its industrial members: BMW Group, Cisco,
<br/>Monaco Telecom, Orange, SAP, SFR, Sharp, STEricsson, Swisscom, Symantec, Thales.
-</td></tr><tr><td>fe108803ee97badfa2a4abb80f27fa86afd9aad9</td><td></td></tr><tr><td>c8db8764f9d8f5d44e739bbcb663fbfc0a40fb3d</td><td>Modeling for part-based visual object
+</td></tr><tr><td>fe108803ee97badfa2a4abb80f27fa86afd9aad9</td><td></td></tr><tr><td>fe0c51fd41cb2d5afa1bc1900bbbadb38a0de139</td><td>Rahman et al. EURASIP Journal on Image and Video Processing (2015) 2015:35
+<br/>DOI 10.1186/s13640-015-0090-5
+<br/>RESEARCH
+<br/>Open Access
+<br/>Bayesian face recognition using 2D
+<br/>Gaussian-Hermite moments
+</td></tr><tr><td>c8db8764f9d8f5d44e739bbcb663fbfc0a40fb3d</td><td>Modeling for part-based visual object
<br/>detection based on local features
<br/>Von der Fakult¨at f¨ur Elektrotechnik und Informationstechnik
<br/>der Rheinisch-Westf¨alischen Technischen Hochschule Aachen
@@ -2863,17 +3565,66 @@
<br/>Tag der m¨undlichen Pr¨ufung: 28. September 2011
<br/>Diese Dissertation ist auf den Internetseiten der
<br/>Hochschulbibliothek online verf¨ugbar.
+</td></tr><tr><td>c86e6ed734d3aa967deae00df003557b6e937d3d</td><td>Generative Adversarial Networks with
+<br/>Decoder-Encoder Output Noise
+<br/>conditional distribution of their neighbors. In [32], Portilla and
+<br/>Simoncelli proposed a parametric texture model based on joint
+<br/>statistics, which uses a decomposition method that is called
+<br/>steerable pyramid decomposition to decompose the texture
+<br/>of images. An example-based super-resolution algorithm [11]
+<br/>was proposed in 2002, which uses a Markov network to model
+<br/>the spatial relationship between the pixels of an image. A
+<br/>scene completion algorithm [16] was proposed in 2007, which
+<br/>applied a semantic scene match technique. These traditional
+<br/>algorithms can be applied to particular image generation tasks,
+<br/>such as texture synthesis and super-resolution. Their common
+<br/>characteristic is that they predict the images pixel by pixel
+<br/>rather than generate an image as a whole, and the basic idea
+<br/>of them is to make an interpolation according to the existing
+<br/>part of the images. Here, the problem is, given a set of images,
+<br/>can we generate totally new images with the same distribution
+<br/>of the given ones?
</td></tr><tr><td>c8a4b4fe5ff2ace9ab9171a9a24064b5a91207a3</td><td>LOCATING FACIAL LANDMARKS WITH BINARY MAP CROSS-CORRELATIONS
<br/>J´er´emie Nicolle
<br/>K´evin Bailly
<br/>Univ. Pierre & Marie Curie, ISIR - CNRS UMR 7222, F-75005, Paris - France
-</td></tr><tr><td>c82c147c4f13e79ad49ef7456473d86881428b89</td><td></td></tr><tr><td>c8adbe00b5661ab9b3726d01c6842c0d72c8d997</td><td>Deep Architectures for Face Attributes
+</td></tr><tr><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Surveillance Face Recognition Challenge
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>c82c147c4f13e79ad49ef7456473d86881428b89</td><td></td></tr><tr><td>c84233f854bbed17c22ba0df6048cbb1dd4d3248</td><td>Exploring Locally Rigid Discriminative
+<br/>Patches for Learning Relative Attributes
+<br/>http://researchweb.iiit.ac.in/~yashaswi.verma/
+<br/>http://www.iiit.ac.in/~jawahar/
+<br/>CVIT
+<br/>IIIT-Hyderabad, India
+<br/>http://cvit.iiit.ac.in
+</td></tr><tr><td>c8adbe00b5661ab9b3726d01c6842c0d72c8d997</td><td>Deep Architectures for Face Attributes
<br/>Computer Vision and Machine Learning Group, Flickr, Yahoo,
</td></tr><tr><td>fb4545782d9df65d484009558e1824538030bbb1</td><td></td></tr><tr><td>fb5280b80edcf088f9dd1da769463d48e7b08390</td><td></td></tr><tr><td>fba464cb8e3eff455fe80e8fb6d3547768efba2f</td><td>
<br/>International Journal of Engineering and Applied Sciences (IJEAS)
<br/> ISSN: 2394-3661, Volume-3, Issue-2, February 2016
<br/>Survey Paper on Emotion Recognition
<br/>
+</td></tr><tr><td>fbb2f81fc00ee0f257d4aa79bbef8cad5000ac59</td><td>Reading Hidden Emotions: Spontaneous
+<br/>Micro-expression Spotting and Recognition
+</td></tr><tr><td>fb9ad920809669c1b1455cc26dbd900d8e719e61</td><td>3D Gaze Estimation from Remote RGB-D Sensors
+<br/>THÈSE NO 6680 (2015)
+<br/>PRÉSENTÉE LE 9 OCTOBRE 2015
+<br/>À LA FACULTÉ DES SCIENCES ET TECHNIQUES DE L'INGÉNIEUR
+<br/>LABORATOIRE DE L'IDIAP
+<br/>PROGRAMME DOCTORAL EN GÉNIE ÉLECTRIQUE
+<br/>ÉCOLE POLYTECHNIQUE FÉDÉRALE DE LAUSANNE
+<br/>POUR L'OBTENTION DU GRADE DE DOCTEUR ÈS SCIENCES
+<br/>PAR
+<br/>acceptée sur proposition du jury:
+<br/>Prof. K. Aminian, président du jury
+<br/>Dr J.-M. Odobez, directeur de thèse
+<br/>Prof. L.-Ph. Morency, rapporteur
+<br/>Prof. D. Witzner Hansen, rapporteur
+<br/>Dr R. Boulic, rapporteur
+<br/>Suisse
+<br/>2015
</td></tr><tr><td>edef98d2b021464576d8d28690d29f5431fd5828</td><td>Pixel-Level Alignment of Facial Images
<br/>for High Accuracy Recognition
<br/>Using Ensemble of Patches
@@ -2958,14 +3709,33 @@
<br/>Subspace Regression: Predicting a Subspace from one Sample
<br/>Anonymous CVPR submission
<br/>Paper ID 1369
+</td></tr><tr><td>c11eb653746afa8148dc9153780a4584ea529d28</td><td>Global and Local Consistent Wavelet-domain Age
+<br/>Synthesis
+</td></tr><tr><td>c1ebbdb47cb6a0ed49c4d1cf39d7565060e6a7ee</td><td>Robust Facial Landmark Localization Based on
</td></tr><tr><td>c17a332e59f03b77921942d487b4b102b1ee73b6</td><td>Learning an appearance-based gaze estimator
<br/>from one million synthesised images
<br/>Tadas Baltruˇsaitis2
-</td></tr><tr><td>c1e76c6b643b287f621135ee0c27a9c481a99054</td><td></td></tr><tr><td>ec22eaa00f41a7f8e45ed833812d1ac44ee1174e</td><td></td></tr><tr><td>ec54000c6c0e660dd99051bdbd7aed2988e27ab8</td><td>TWO IN ONE: JOINT POSE ESTIMATION AND FACE RECOGNITION WITH P2CA1
+</td></tr><tr><td>c1e76c6b643b287f621135ee0c27a9c481a99054</td><td></td></tr><tr><td>c6f3399edb73cfba1248aec964630c8d54a9c534</td><td>A Comparison of CNN-based Face and Head Detectors for
+<br/>Real-Time Video Surveillance Applications
+<br/>1 ´Ecole de technologie sup´erieure, Universit´e du Qu´ebec, Montreal, Canada
+<br/>2 Genetec Inc., Montreal, Canada
+</td></tr><tr><td>c62c07de196e95eaaf614fb150a4fa4ce49588b4</td><td>Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence (IJCAI-18)
+<br/>1078
+</td></tr><tr><td>ec1e03ec72186224b93b2611ff873656ed4d2f74</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>3D Reconstruction of “In-the-Wild” Faces in
+<br/>Images and Videos
+</td></tr><tr><td>ec22eaa00f41a7f8e45ed833812d1ac44ee1174e</td><td></td></tr><tr><td>ec54000c6c0e660dd99051bdbd7aed2988e27ab8</td><td>TWO IN ONE: JOINT POSE ESTIMATION AND FACE RECOGNITION WITH P2CA1
<br/>*Dept. Teoria del Senyal i Comunicacions - Universitat Politècnica de Catalunya, Barcelona, Spain
<br/>+Dipartimento di Elettronica e Informazione - Politecnico di Milano, Meiland, Italy
</td></tr><tr><td>ec0104286c96707f57df26b4f0a4f49b774c486b</td><td>758
<br/>An Ensemble CNN2ELM for Age Estimation
+</td></tr><tr><td>4e32fbb58154e878dd2fd4b06398f85636fd0cf4</td><td>A Hierarchical Matcher using Local Classifier Chains
+<br/>L. Zhang and I.A. Kakadiaris
+<br/>Computational Biomedicine Lab, 4849 Calhoun Rd, Rm 373, Houston, TX 77204
+</td></tr><tr><td>4e27fec1703408d524d6b7ed805cdb6cba6ca132</td><td>SSD-Sface: Single shot multibox detector for small faces
+<br/>C. Thuis
+</td></tr><tr><td>4e6c9be0b646d60390fe3f72ce5aeb0136222a10</td><td>Long-term Temporal Convolutions
+<br/>for Action Recognition
</td></tr><tr><td>4e444db884b5272f3a41e4b68dc0d453d4ec1f4c</td><td></td></tr><tr><td>4ef0a6817a7736c5641dc52cbc62737e2e063420</td><td>International Journal of Advanced Computer Research (ISSN (Print): 2249-7277 ISSN (Online): 2277-7970)
<br/>Volume-4 Number-4 Issue-17 December-2014
<br/>Study of Face Recognition Techniques
@@ -2994,7 +3764,35 @@
<br/>http://www.informatik.uni-hamburg.de/WTM
</td></tr><tr><td>20e504782951e0c2979d9aec88c76334f7505393</td><td>Robust LSTM-Autoencoders for Face De-Occlusion
<br/>in the Wild
-</td></tr><tr><td>20767ca3b932cbc7b8112db21980d7b9b3ea43a3</td><td></td></tr><tr><td>20c2a5166206e7ffbb11a23387b9c5edf42b5230</td><td></td></tr><tr><td>2098983dd521e78746b3b3fa35a22eb2fa630299</td><td></td></tr><tr><td>20532b1f80b509f2332b6cfc0126c0f80f438f10</td><td>A deep matrix factorization method for learning
+</td></tr><tr><td>20ade100a320cc761c23971d2734388bfe79f7c5</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+<br/>Subspace Clustering via Good Neighbors
+</td></tr><tr><td>20767ca3b932cbc7b8112db21980d7b9b3ea43a3</td><td></td></tr><tr><td>20c2a5166206e7ffbb11a23387b9c5edf42b5230</td><td></td></tr><tr><td>2098983dd521e78746b3b3fa35a22eb2fa630299</td><td></td></tr><tr><td>206e24f7d4b3943b35b069ae2d028143fcbd0704</td><td>Learning Structure and Strength of CNN Filters for Small Sample Size Training
+<br/>IIIT-Delhi, India
+</td></tr><tr><td>2059d2fecfa61ddc648be61c0cbc9bc1ad8a9f5b</td><td>TRANSACTIONS ON AUDIO, SPEECH, AND LANGUAGE PROCESSING, VOL. 23, NO. 4, APRIL 2015
+<br/>Co-Localization of Audio Sources in Images Using
+<br/>Binaural Features and Locally-Linear Regression
+<br/>∗ INRIA Grenoble Rhˆone-Alpes, Montbonnot Saint-Martin, France
+<br/>† Univ. Grenoble Alpes, GIPSA-Lab, France
+<br/>‡ Dept. Electrical Eng., Technion-Israel Inst. of Technology, Haifa, Israel
+</td></tr><tr><td>206fbe6ab6a83175a0ef6b44837743f8d5f9b7e8</td><td></td></tr><tr><td>20111924fbf616a13d37823cd8712a9c6b458cd6</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 130 – No.11, November2015
+<br/>Linear Regression Line based Partial Face Recognition
+<br/>Naveena M.
+<br/>Department of Studies in
+<br/>Computer Science,
+<br/>Manasagagothri,
+<br/>Mysore.
+<br/>Department of Studies in
+<br/>Computer Science,
+<br/>Manasagagothri,
+<br/>Mysore.
+<br/>P. Nagabhushan
+<br/>Department of Studies in
+<br/>Computer Science,
+<br/>Manasagagothri,
+<br/>Mysore.
+<br/>images. In
+</td></tr><tr><td>20532b1f80b509f2332b6cfc0126c0f80f438f10</td><td>A deep matrix factorization method for learning
<br/>attribute representations
<br/>Bj¨orn W. Schuller, Senior member, IEEE
</td></tr><tr><td>205af28b4fcd6b569d0241bb6b255edb325965a4</td><td>Intel Serv Robotics (2008) 1:143–157
@@ -3031,6 +3829,20 @@
<br/>ADVISERS:
</td></tr><tr><td>18d5b0d421332c9321920b07e0e8ac4a240e5f1f</td><td>Collaborative Representation Classification
<br/>Ensemble for Face Recognition
+</td></tr><tr><td>18d51a366ce2b2068e061721f43cb798177b4bb7</td><td>Cognition and Emotion
+<br/>ISSN: 0269-9931 (Print) 1464-0600 (Online) Journal homepage: http://www.tandfonline.com/loi/pcem20
+<br/>Looking into your eyes: observed pupil size
+<br/>influences approach-avoidance responses
+<br/>eyes: observed pupil size influences approach-avoidance responses, Cognition and Emotion, DOI:
+<br/>10.1080/02699931.2018.1472554
+<br/>To link to this article: https://doi.org/10.1080/02699931.2018.1472554
+<br/>View supplementary material
+<br/>Published online: 11 May 2018.
+<br/>Submit your article to this journal
+<br/>View related articles
+<br/>View Crossmark data
+<br/>Full Terms & Conditions of access and use can be found at
+<br/>http://www.tandfonline.com/action/journalInformation?journalCode=pcem20
</td></tr><tr><td>1885acea0d24e7b953485f78ec57b2f04e946eaf</td><td>Combining Local and Global Features for 3D Face Tracking
<br/>Megvii (face++) Research
</td></tr><tr><td>184750382fe9b722e78d22a543e852a6290b3f70</td><td></td></tr><tr><td>18a849b1f336e3c3b7c0ee311c9ccde582d7214f</td><td>Int J Comput Vis
@@ -3045,7 +3857,38 @@
<br/>THE BASICS
</td></tr><tr><td>185360fe1d024a3313042805ee201a75eac50131</td><td>299
<br/>Person De-Identification in Videos
-</td></tr><tr><td>18dfc2434a95f149a6cbb583cca69a98c9de9887</td><td></td></tr><tr><td>27d709f7b67204e1e5e05fe2cfac629afa21699d</td><td></td></tr><tr><td>27cccf992f54966feb2ab4831fab628334c742d8</td><td>International Journal of Computer Applications (0975 – 8887)
+</td></tr><tr><td>18dfc2434a95f149a6cbb583cca69a98c9de9887</td><td></td></tr><tr><td>27d709f7b67204e1e5e05fe2cfac629afa21699d</td><td></td></tr><tr><td>275b5091c50509cc8861e792e084ce07aa906549</td><td>Institut für Informatik
+<br/>der Technischen
+<br/>Universität München
+<br/>Dissertation
+<br/>Leveraging the User’s Face as a Known Object
+<br/>in Handheld Augmented Reality
+<br/>Sebastian Bernhard Knorr
+</td></tr><tr><td>270733d986a1eb72efda847b4b55bc6ba9686df4</td><td>We are IntechOpen,
+<br/>the first native scientific
+<br/>publisher of Open Access books
+<br/>3,350
+<br/>108,000
+<br/>1.7 M
+<br/>Open access books available
+<br/>International authors and editors
+<br/>Downloads
+<br/>Our authors are among the
+<br/>151
+<br/>Countries delivered to
+<br/>TOP 1%
+<br/>12.2%
+<br/>most cited scientists
+<br/>Contributors from top 500 universities
+<br/>Selection of our books indexed in the Book Citation Index
+<br/>in Web of Science™ Core Collection (BKCI)
+<br/>Interested in publishing with us?
+<br/>Numbers displayed above are based on latest data collected.
+<br/>For more information visit www.intechopen.com
+</td></tr><tr><td>27da432cf2b9129dce256e5bf7f2f18953eef5a5</td><td></td></tr><tr><td>2770b095613d4395045942dc60e6c560e882f887</td><td>GridFace: Face Rectification via Learning Local
+<br/>Homography Transformations
+<br/>Face++, Megvii Inc.
+</td></tr><tr><td>27cccf992f54966feb2ab4831fab628334c742d8</td><td>International Journal of Computer Applications (0975 – 8887)
<br/>Volume 64– No.18, February 2013
<br/>Facial Expression Recognition by Statistical, Spatial
<br/>Features and using Decision Tree
@@ -3086,6 +3929,8 @@
</td></tr><tr><td>4b04247c7f22410681b6aab053d9655cf7f3f888</td><td>Robust Face Recognition by Constrained Part-based
<br/>Alignment
</td></tr><tr><td>4b60e45b6803e2e155f25a2270a28be9f8bec130</td><td>Attribute Based Object Identification
+</td></tr><tr><td>4b48e912a17c79ac95d6a60afed8238c9ab9e553</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>Minimum Margin Loss for Deep Face Recognition
</td></tr><tr><td>4b5eeea5dd8bd69331bd4bd4c66098b125888dea</td><td>Human Activity Recognition Using Conditional
<br/>Random Fields and Privileged Information
<br/>submitted to
@@ -3111,9 +3956,15 @@
<br/>JUNE 2008
<br/>Tied Factor Analysis for Face Recognition
<br/>across Large Pose Differences
-</td></tr><tr><td>111a9645ad0108ad472b2f3b243ed3d942e7ff16</td><td>Facial Expression Classification Using
+</td></tr><tr><td>112780a7fe259dc7aff2170d5beda50b2bfa7bda</td><td></td></tr><tr><td>111a9645ad0108ad472b2f3b243ed3d942e7ff16</td><td>Facial Expression Classification Using
<br/>Combined Neural Networks
<br/>DEE/PUC-Rio, Marquês de São Vicente 225, Rio de Janeiro – RJ - Brazil
+</td></tr><tr><td>111d0b588f3abbbea85d50a28c0506f74161e091</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 134 – No.10, January 2016
+<br/>Facial Expression Recognition from Visual Information
+<br/>using Curvelet Transform
+<br/>Surabhi Group of Institution Bhopal
+<br/>systems. Further applications
</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>Labeled Faces in the Wild: A Survey
</td></tr><tr><td>7d73adcee255469aadc5e926066f71c93f51a1a5</td><td>978-1-4799-9988-0/16/$31.00 ©2016 IEEE
<br/>1283
@@ -3160,6 +4011,8 @@
<br/>Published online: 14 January 2010
<br/>© Springer Science+Business Media, LLC 2010
</td></tr><tr><td>290136947fd44879d914085ee51d8a4f433765fa</td><td>On a Taxonomy of Facial Features
+</td></tr><tr><td>2957715e96a18dbb5ed5c36b92050ec375214aa6</td><td>Improving Face Attribute Detection with Race and Gender Diversity
+<br/>InclusiveFaceNet:
</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>MS-Celeb-1M: A Dataset and Benchmark for
<br/>Large-Scale Face Recognition
<br/>Microsoft Research
@@ -3228,20 +4081,114 @@
</td></tr><tr><td>7c7b0550ec41e97fcfc635feffe2e53624471c59</td><td>1051-4651/14 $31.00 © 2014 IEEE
<br/>DOI 10.1109/ICPR.2014.124
<br/>660
-</td></tr><tr><td>7ce03597b703a3b6754d1adac5fbc98536994e8f</td><td></td></tr><tr><td>7c1e1c767f7911a390d49bed4f73952df8445936</td><td>NON-RIGID OBJECT DETECTION WITH LOCAL INTERLEAVED SEQUENTIAL ALIGNMENT (LISA)
+</td></tr><tr><td>7ce03597b703a3b6754d1adac5fbc98536994e8f</td><td></td></tr><tr><td>7c9a65f18f7feb473e993077d087d4806578214e</td><td>SpringerLink - Zeitschriftenbeitrag
+<br/>http://www.springerlink.com/content/93hr862660nl1164/?p=abe5352...
+<br/>Deutsch
+<br/>Deutsch
+<br/>Go
+<br/>Vorherige Beitrag Nächste Beitrag
+<br/>Beitrag markieren
+<br/>In den Warenkorb legen
+<br/>Zu gespeicherten Artikeln
+<br/>hinzufügen
+<br/>Permissions & Reprints
+<br/>Diesen Artikel empfehlen
+<br/>Ergebnisse
+<br/>finden
+<br/>Erweiterte Suche
+<br/>Go
+<br/>im gesamten Inhalt
+<br/>in dieser Zeitschrift
+<br/>in diesem Heft
+<br/>Diesen Beitrag exportieren
+<br/>Diesen Beitrag exportieren als RIS
+<br/>| Text
+<br/>Text
+<br/>PDF
+<br/>PDF ist das gebräuchliche Format
+<br/>für Online Publikationen. Die Größe
+<br/>dieses Dokumentes beträgt 564
+<br/>Kilobyte. Je nach Art Ihrer
+<br/>Internetverbindung kann der
+<br/>Download einige Zeit in Anspruch
+<br/>nehmen.
+<br/>öffnen: Gesamtdokument
+<br/>Publikationsart Subject Collections
+<br/>Zurück zu: Journal Issue
+<br/>Athens Authentication Point
+<br/>Zeitschriftenbeitrag
+<br/>Willkommen!
+<br/>Um unsere personalisierten
+<br/>Angebote nutzen zu können,
+<br/>müssen Sie angemeldet sein.
+<br/>Login
+<br/>Jetzt registrieren
+<br/>Zugangsdaten vergessen?
+<br/>Hilfe.
+<br/>Mein Menü
+<br/>Markierte Beiträge
+<br/>Alerts
+<br/>Meine Bestellungen
+<br/>Private emotions versus social interaction: a data-driven approach towards
+<br/>analysing emotion in speech
+<br/>Zeitschrift
+<br/>Verlag
+<br/>ISSN
+<br/>Heft
+<br/>Kategorie
+<br/>DOI
+<br/>Seiten
+<br/>Subject Collection
+<br/>SpringerLink Date
+<br/>User Modeling and User-Adapted Interaction
+<br/>Springer Netherlands
+<br/>0924-1868 (Print) 1573-1391 (Online)
+<br/>Volume 18, Numbers 1-2 / Februar 2008
+<br/>Original Paper
+<br/>10.1007/s11257-007-9039-4
+<br/>175-206
+<br/>Informatik
+<br/>Freitag, 12. Oktober 2007
+<br/>Gespeicherte Beiträge
+<br/>Alle
+<br/>Favoriten
+<br/>(1) Lehrstuhl für Mustererkennung, FAU Erlangen – Nürnberg, Martensstr. 3, 91058 Erlangen,
+<br/>Germany
+<br/>Received: 3 July 2006 Accepted: 14 January 2007 Published online: 12 October 2007
+</td></tr><tr><td>7c1e1c767f7911a390d49bed4f73952df8445936</td><td>NON-RIGID OBJECT DETECTION WITH LOCAL INTERLEAVED SEQUENTIAL ALIGNMENT (LISA)
<br/>Non-Rigid Object Detection with Local
<br/>Interleaved Sequential Alignment (LISA)
<br/>and Tom´aˇs Svoboda, Member, IEEE
</td></tr><tr><td>7c349932a3d083466da58ab1674129600b12b81c</td><td></td></tr><tr><td>1648cf24c042122af2f429641ba9599a2187d605</td><td>Boosting Cross-Age Face Verification via Generative Age Normalization
<br/>(cid:2) Orange Labs, 4 rue Clos Courtel, 35512 Cesson-S´evign´e, France
<br/>† Eurecom, 450 route des Chappes, 06410 Biot, France
+</td></tr><tr><td>162403e189d1b8463952fa4f18a291241275c354</td><td>Action Recognition with Spatio-Temporal
+<br/>Visual Attention on Skeleton Image Sequences
+<br/>With a strong ability of modeling sequential data, Recur-
+<br/>rent Neural Networks (RNN) with Long Short-Term Memory
+<br/>(LSTM) neurons outperform the previous hand-crafted feature
+<br/>based methods [9], [10]. Each skeleton frame is converted into
+<br/>a feature vector and the whole sequence is fed into the RNN.
+<br/>Despite the strong ability in modeling temporal sequences,
+<br/>RNN structures lack the ability to efficiently learn the spatial
+<br/>relations between the joints. To better use spatial information,
+<br/>a hierarchical structure is proposed in [11], [12] that feeds
+<br/>the joints into the network as several pre-defined body part
+<br/>groups. However,
+<br/>limit
+<br/>the effectiveness of representing spatial relations. A spatio-
+<br/>temporal 2D LSTM (ST-LSTM) network [13] is proposed
+<br/>to learn the spatial and temporal relations simultaneously.
+<br/>Furthermore, a two-stream RNN structure [14] is proposed to
+<br/>learn the spatio-temporal relations with two RNN branches.
+<br/>the pre-defined body regions still
</td></tr><tr><td>160259f98a6ec4ec3e3557de5e6ac5fa7f2e7f2b</td><td>Discriminant Multi-Label Manifold Embedding for Facial Action Unit
<br/>Detection
<br/>Signal Procesing Laboratory (LTS5), ´Ecole Polytechnique F´ed´erale de Lausanne, Switzerland
</td></tr><tr><td>16671b2dc89367ce4ed2a9c241246a0cec9ec10e</td><td>2006
<br/>Detecting the Number of Clusters
<br/>in n-Way Probabilistic Clustering
-</td></tr><tr><td>16892074764386b74b6040fe8d6946b67a246a0b</td><td></td></tr><tr><td>16395b40e19cbc6d5b82543039ffff2a06363845</td><td>Action Recognition in Video Using Sparse Coding and Relative Features
+</td></tr><tr><td>16de1324459fe8fdcdca80bba04c3c30bb789bdf</td><td></td></tr><tr><td>16892074764386b74b6040fe8d6946b67a246a0b</td><td></td></tr><tr><td>16395b40e19cbc6d5b82543039ffff2a06363845</td><td>Action Recognition in Video Using Sparse Coding and Relative Features
<br/>Anal´ı Alfaro
<br/>P. Universidad Catolica de Chile
<br/>P. Universidad Catolica de Chile
@@ -3256,6 +4203,25 @@
<br/>and Timing of Smiles Perceived as Amused, Polite,
<br/>and Embarrassed/Nervous
<br/>Ó Springer Science+Business Media, LLC 2008
+</td></tr><tr><td>166186e551b75c9b5adcc9218f0727b73f5de899</td><td>Volume 4, Issue 2, February 2016
+<br/>International Journal of Advance Research in
+<br/>Computer Science and Management Studies
+<br/>Research Article / Survey Paper / Case Study
+<br/>Available online at: www.ijarcsms.com
+<br/>ISSN: 2321-7782 (Online)
+<br/>Automatic Age and Gender Recognition in Human Face Image
+<br/>Dataset using Convolutional Neural Network System
+<br/>Subhani Shaik1
+<br/>Assoc. Prof & Head of the Department
+<br/>Department of CSE,
+<br/>Associate Professor
+<br/>Department of CSE,
+<br/>St.Mary’s Group of Institutions Guntur
+<br/>St.Mary’s Group of Institutions Guntur
+<br/>Chebrolu(V&M),Guntur(Dt),
+<br/>Andhra Pradesh - India
+<br/>Chebrolu(V&M),Guntur(Dt),
+<br/>Andhra Pradesh - India
</td></tr><tr><td>16d9b983796ffcd151bdb8e75fc7eb2e31230809</td><td>EUROGRAPHICS 2018 / D. Gutierrez and A. Sheffer
<br/>(Guest Editors)
<br/>Volume 37 (2018), Number 2
@@ -3264,6 +4230,10 @@
</td></tr><tr><td>1679943d22d60639b4670eba86665371295f52c3</td><td></td></tr><tr><td>169076ffe5e7a2310e98087ef7da25aceb12b62d</td><td></td></tr><tr><td>161eb88031f382e6a1d630cd9a1b9c4bc6b47652</td><td>1
<br/>Automatic Facial Expression Recognition
<br/>Using Features of Salient Facial Patches
+</td></tr><tr><td>4209783b0cab1f22341f0600eed4512155b1dee6</td><td>Accurate and Efficient Similarity Search for Large Scale Face Recognition
+<br/>BUPT
+<br/>BUPT
+<br/>BUPT
</td></tr><tr><td>42e3dac0df30d754c7c7dab9e1bb94990034a90d</td><td>PANDA: Pose Aligned Networks for Deep Attribute Modeling
<br/>2EECS, UC Berkeley
<br/>1Facebook AI Research
@@ -3302,7 +4272,23 @@
<br/>Factorization in the Presence of Outliers and
<br/>Missing Data
</td></tr><tr><td>89de30a75d3258816c2d4d5a733d2bef894b66b9</td><td></td></tr><tr><td>8913a5b7ed91c5f6dec95349fbc6919deee4fc75</td><td>BigBIRD: A Large-Scale 3D Database of Object Instances
-</td></tr><tr><td>45c340c8e79077a5340387cfff8ed7615efa20fd</td><td></td></tr><tr><td>45f3bf505f1ce9cc600c867b1fb2aa5edd5feed8</td><td></td></tr><tr><td>4571626d4d71c0d11928eb99a3c8b10955a74afe</td><td>Geometry Guided Adversarial Facial Expression Synthesis
+</td></tr><tr><td>89d3a57f663976a9ac5e9cdad01267c1fc1a7e06</td><td>Neural Class-Specific Regression for face
+<br/>verification
+</td></tr><tr><td>891b10c4b3b92ca30c9b93170ec9abd71f6099c4</td><td>Facial landmark detection using structured output deep
+<br/>neural networks
+<br/>Soufiane Belharbi ∗1, Cl´ement Chatelain∗1, Romain H´erault∗1, and S´ebastien
+<br/>1LITIS EA 4108, INSA de Rouen, Saint ´Etienne du Rouvray 76800, France
+<br/>2LITIS EA 4108, UFR des Sciences, Universit´e de Rouen, France.
+<br/>September 24, 2015
+</td></tr><tr><td>45c340c8e79077a5340387cfff8ed7615efa20fd</td><td></td></tr><tr><td>45e7ddd5248977ba8ec61be111db912a4387d62f</td><td>CHEN ET AL.: ADVERSARIAL POSENET
+<br/>Adversarial Learning of Structure-Aware Fully
+<br/>Convolutional Networks for Landmark
+<br/>Localization
+</td></tr><tr><td>45f3bf505f1ce9cc600c867b1fb2aa5edd5feed8</td><td></td></tr><tr><td>4560491820e0ee49736aea9b81d57c3939a69e12</td><td>Investigating the Impact of Data Volume and
+<br/>Domain Similarity on Transfer Learning
+<br/>Applications
+<br/>State Farm Insurance, Bloomington IL 61710, USA,
+</td></tr><tr><td>4571626d4d71c0d11928eb99a3c8b10955a74afe</td><td>Geometry Guided Adversarial Facial Expression Synthesis
<br/>1National Laboratory of Pattern Recognition, CASIA
<br/>2Center for Research on Intelligent Perception and Computing, CASIA
<br/>3Center for Excellence in Brain Science and Intelligence Technology, CAS
@@ -3317,7 +4303,19 @@
<br/>© EURASIP, 2011 - ISSN 2076-1465
<br/>19th European Signal Processing Conference (EUSIPCO 2011)
<br/>INTRODUCTION
-</td></tr><tr><td>4511e09ee26044cb46073a8c2f6e1e0fbabe33e8</td><td></td></tr><tr><td>1f8304f4b51033d2671147b33bb4e51b9a1e16fe</td><td>Noname manuscript No.
+</td></tr><tr><td>4511e09ee26044cb46073a8c2f6e1e0fbabe33e8</td><td></td></tr><tr><td>45a6333fc701d14aab19f9e2efd59fe7b0e89fec</td><td>HAND POSTURE DATASET CREATION FOR GESTURE
+<br/>RECOGNITION
+<br/>Luis Anton-Canalis
+<br/>Instituto de Sistemas Inteligentes y Aplicaciones Numericas en Ingenieria
+<br/>Campus Universitario de Tafira, 35017 Gran Canaria, Spain
+<br/>Elena Sanchez-Nielsen
+<br/>Departamento de E.I.O. y Computacion
+<br/>38271 Universidad de La Laguna, Spain
+<br/>Keywords:
+<br/>Image understanding, Gesture recognition, Hand dataset.
+</td></tr><tr><td>1ffe20eb32dbc4fa85ac7844178937bba97f4bf0</td><td>Face Clustering: Representation and Pairwise
+<br/>Constraints
+</td></tr><tr><td>1f8304f4b51033d2671147b33bb4e51b9a1e16fe</td><td>Noname manuscript No.
<br/>(will be inserted by the editor)
<br/>Beyond Trees:
<br/>MAP Inference in MRFs via Outer-Planar Decomposition
@@ -3364,7 +4362,27 @@
<br/>Boltzmannstr. 3, 85748 Garching b. Muenchen, Germany
<br/>∗ Multimedia Communications Department, EURECOM
<br/>450 Route des Chappes, 06410 Biot, France
+</td></tr><tr><td>1fff309330f85146134e49e0022ac61ac60506a9</td><td>Data-Driven Sparse Sensor Placement for Reconstruction
+</td></tr><tr><td>7323b594d3a8508f809e276aa2d224c4e7ec5a80</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>An Experimental Evaluation of Covariates
+<br/>Effects on Unconstrained Face Verification
</td></tr><tr><td>732e8d8f5717f8802426e1b9debc18a8361c1782</td><td>Unimodal Probability Distributions for Deep Ordinal Classification
+</td></tr><tr><td>73ed64803d6f2c49f01cffef8e6be8fc9b5273b8</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Cooking in the kitchen: Recognizing and Segmenting Human
+<br/>Activities in Videos
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>7306d42ca158d40436cc5167e651d7ebfa6b89c1</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Transductive Zero-Shot Action Recognition by
+<br/>Word-Vector Embedding
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>734cdda4a4de2a635404e4c6b61f1b2edb3f501d</td><td>Tie and Guan EURASIP Journal on Image and Video Processing 2013, 2013:8
+<br/>http://jivp.eurasipjournals.com/content/2013/1/8
+<br/>R ES EAR CH
+<br/>Open Access
+<br/>Automatic landmark point detection and tracking
+<br/>for human facial expressions
</td></tr><tr><td>732686d799d760ccca8ad47b49a8308b1ab381fb</td><td>Running head: TEACHERS’ DIFFERING BEHAVIORS
<br/>1
<br/>Graduate School of Psychology
@@ -3377,27 +4395,62 @@
</td></tr><tr><td>73fbdd57270b9f91f2e24989178e264f2d2eb7ae</td><td>978-1-4673-0046-9/12/$26.00 ©2012 IEEE
<br/>1945
<br/>ICASSP 2012
-</td></tr><tr><td>871f5f1114949e3ddb1bca0982086cc806ce84a8</td><td>Discriminative Learning of Apparel Features
+</td></tr><tr><td>73c9cbbf3f9cea1bc7dce98fce429bf0616a1a8c</td><td></td></tr><tr><td>871f5f1114949e3ddb1bca0982086cc806ce84a8</td><td>Discriminative Learning of Apparel Features
<br/>1 Computer Vision Laboratory, D-ITET, ETH Z¨urich, Switzerland
<br/>2 ESAT - PSI / IBBT, K.U. Leuven, Belgium
+</td></tr><tr><td>878169be6e2c87df2d8a1266e9e37de63b524ae7</td><td>CBMM Memo No. 089
+<br/>May 10, 2018
+<br/>Image interpretation above and below the object level
+</td></tr><tr><td>878301453e3d5cb1a1f7828002ea00f59cbeab06</td><td>Faceness-Net: Face Detection through
+<br/>Deep Facial Part Responses
+</td></tr><tr><td>87e592ee1a7e2d34e6b115da08700a1ae02e9355</td><td>Deep Pictorial Gaze Estimation
+<br/>AIT Lab, Department of Computer Science, ETH Zurich
</td></tr><tr><td>87bb183d8be0c2b4cfceb9ee158fee4bbf3e19fd</td><td>Craniofacial Image Analysis
-</td></tr><tr><td>80193dd633513c2d756c3f568ffa0ebc1bb5213e</td><td></td></tr><tr><td>804b4c1b553d9d7bae70d55bf8767c603c1a09e3</td><td>978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+</td></tr><tr><td>8006219efb6ab76754616b0e8b7778dcfb46603d</td><td>CONTRIBUTIONSTOLARGE-SCALELEARNINGFORIMAGECLASSIFICATIONZeynepAkataPhDThesisl’´EcoleDoctoraleMath´ematiques,SciencesetTechnologiesdel’Information,InformatiquedeGrenoble </td></tr><tr><td>80193dd633513c2d756c3f568ffa0ebc1bb5213e</td><td></td></tr><tr><td>804b4c1b553d9d7bae70d55bf8767c603c1a09e3</td><td>978-1-4799-9988-0/16/$31.00 ©2016 IEEE
<br/>1831
<br/>ICASSP 2016
</td></tr><tr><td>800cbbe16be0f7cb921842d54967c9a94eaa2a65</td><td>MULTIMODAL RECOGNITION OF
<br/>EMOTIONS
+</td></tr><tr><td>803c92a3f0815dbf97e30c4ee9450fd005586e1a</td><td>Max-Mahalanobis Linear Discriminant Analysis Networks
+</td></tr><tr><td>80345fbb6bb6bcc5ab1a7adcc7979a0262b8a923</td><td>Research Article
+<br/>Soft Biometrics for a Socially Assistive Robotic
+<br/>Platform
+<br/>Open Access
</td></tr><tr><td>80a6bb337b8fdc17bffb8038f3b1467d01204375</td><td>Proceedings of the International Conference on Computer and Information Science and Technology
<br/>Ottawa, Ontario, Canada, May 11 – 12, 2015
<br/>Paper No. 126
<br/>Subspace LDA Methods for Solving the Small Sample Size
<br/>Problem in Face Recognition
<br/><b></b><br/>101 KwanFu Rd., Sec. 2, Hsinchu, Taiwan
+</td></tr><tr><td>80097a879fceff2a9a955bf7613b0d3bfa68dc23</td><td>Active Self-Paced Learning for Cost-Effective and
+<br/>Progressive Face Identification
</td></tr><tr><td>74408cfd748ad5553cba8ab64e5f83da14875ae8</td><td>Facial Expressions Tracking and Recognition: Database Protocols for Systems Validation
<br/>and Evaluation
-</td></tr><tr><td>747d5fe667519acea1bee3df5cf94d9d6f874f20</td><td></td></tr><tr><td>74b0095944c6e29837c208307a67116ebe1231c8</td><td></td></tr><tr><td>74156a11c2997517061df5629be78428e1f09cbd</td><td>Cancún Center, Cancún, México, December 4-8, 2016
+</td></tr><tr><td>747d5fe667519acea1bee3df5cf94d9d6f874f20</td><td></td></tr><tr><td>74dbe6e0486e417a108923295c80551b6d759dbe</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 45– No.11, May 2012
+<br/>An HMM based Model for Prediction of Emotional
+<br/>Composition of a Facial Expression using both
+<br/>Significant and Insignificant Action Units and
+<br/>Associated Gender Differences
+<br/>Department of Management and Information
+<br/>Department of Management and Information
+<br/>Systems Science
+<br/>1603-1 Kamitomioka, Nagaoka
+<br/>Niigata, Japan
+<br/>Systems Science
+<br/>1603-1 Kamitomioka, Nagaoka
+<br/>Niigata, Japan
+</td></tr><tr><td>747c25bff37b96def96dc039cc13f8a7f42dbbc7</td><td>EmoNets: Multimodal deep learning approaches for emotion
+<br/>recognition in video
+</td></tr><tr><td>74b0095944c6e29837c208307a67116ebe1231c8</td><td></td></tr><tr><td>74156a11c2997517061df5629be78428e1f09cbd</td><td>Cancún Center, Cancún, México, December 4-8, 2016
<br/>978-1-5090-4846-5/16/$31.00 ©2016 IEEE
<br/>2784
-</td></tr><tr><td>745b42050a68a294e9300228e09b5748d2d20b81</td><td></td></tr><tr><td>7480d8739eb7ab97c12c14e75658e5444b852e9f</td><td>NEGREL ET AL.: REVISITED MLBOOST FOR FACE RETRIEVAL
+</td></tr><tr><td>745b42050a68a294e9300228e09b5748d2d20b81</td><td></td></tr><tr><td>749d605dd12a4af58de1fae6f5ef5e65eb06540e</td><td>Multi-Task Video Captioning with Video and Entailment Generation
+<br/>UNC Chapel Hill
+</td></tr><tr><td>74c19438c78a136677a7cb9004c53684a4ae56ff</td><td>RESOUND: Towards Action Recognition
+<br/>without Representation Bias
+<br/>UC San Diego
+</td></tr><tr><td>7480d8739eb7ab97c12c14e75658e5444b852e9f</td><td>NEGREL ET AL.: REVISITED MLBOOST FOR FACE RETRIEVAL
<br/>MLBoost Revisited: A Faster Metric
<br/>Learning Algorithm for Identity-Based Face
<br/>Retrieval
@@ -3426,6 +4479,24 @@
<br/>J. Paone, D. Bolme, R. Ferrell, Member, IEEE, D. Aykac, and
<br/>T. Karnowski, Member, IEEE
<br/>Oak Ridge National Laboratory, Oak Ridge, TN
+</td></tr><tr><td>1a849b694f2d68c3536ed849ed78c82e979d64d5</td><td>This is a repository copy of Symmetric Shape Morphing for 3D Face and Head Modelling.
+<br/>White Rose Research Online URL for this paper:
+<br/>http://eprints.whiterose.ac.uk/131760/
+<br/>Version: Accepted Version
+<br/>Proceedings Paper:
+<br/>Dai, Hang, Pears, Nicholas Edwin orcid.org/0000-0001-9513-5634, Smith, William Alfred
+<br/>Peter orcid.org/0000-0002-6047-0413 et al. (1 more author) (2018) Symmetric Shape
+<br/>Morphing for 3D Face and Head Modelling. In: The 13th IEEE Conference on Automatic
+<br/>Face and Gesture Recognition. IEEE .
+<br/>Reuse
+<br/>Items deposited in White Rose Research Online are protected by copyright, with all rights reserved unless
+<br/>indicated otherwise. They may be downloaded and/or printed for private study, or other acts as permitted by
+<br/>national copyright laws. The publisher or other rights holders may allow further reproduction and re-use of
+<br/>the full text version. This is indicated by the licence information on the White Rose Research Online record
+<br/>for the item.
+<br/>Takedown
+<br/>If you consider content in White Rose Research Online to be in breach of UK law, please notify us by
+<br/>https://eprints.whiterose.ac.uk/
</td></tr><tr><td>1a3eee980a2252bb092666cf15dd1301fa84860e</td><td>PCA GAUSSIANIZATION FOR IMAGE PROCESSING
<br/>Image Processing Laboratory (IPL), Universitat de Val`encia
<br/>Catedr´atico A. Escardino - 46980 Paterna, Val`encia, Spain
@@ -3463,7 +4534,13 @@
</td></tr><tr><td>28bc378a6b76142df8762cd3f80f737ca2b79208</td><td>Understanding Objects in Detail with Fine-grained Attributes
<br/>Ross Girshick5
<br/>David Weiss7
-</td></tr><tr><td>287900f41dd880802aa57f602e4094a8a9e5ae56</td><td></td></tr><tr><td>28aa89b2c827e5dd65969a5930a0520fdd4a3dc7</td><td></td></tr><tr><td>28b061b5c7f88f48ca5839bc8f1c1bdb1e6adc68</td><td>Predicting User Annoyance Using Visual Attributes
+</td></tr><tr><td>287900f41dd880802aa57f602e4094a8a9e5ae56</td><td></td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td></td></tr><tr><td>2866cbeb25551257683cf28f33d829932be651fe</td><td>In Proceedings of the 2018 IEEE International Conference on Image Processing (ICIP)
+<br/>The final publication is available at: http://dx.doi.org/10.1109/ICIP.2018.8451026
+<br/>A TWO-STEP LEARNING METHOD FOR DETECTING LANDMARKS
+<br/>ON FACES FROM DIFFERENT DOMAINS
+<br/>Erickson R. Nascimento
+<br/>Universidade Federal de Minas Gerais (UFMG), Brazil
+</td></tr><tr><td>28aa89b2c827e5dd65969a5930a0520fdd4a3dc7</td><td></td></tr><tr><td>28b061b5c7f88f48ca5839bc8f1c1bdb1e6adc68</td><td>Predicting User Annoyance Using Visual Attributes
<br/>Virginia Tech
<br/>Goibibo
<br/>Virginia Tech
@@ -3495,7 +4572,10 @@
<br/>the name of the author
</td></tr><tr><td>176f26a6a8e04567ea71677b99e9818f8a8819d0</td><td>MEG: Multi-Expert Gender classification from
<br/>face images in a demographics-balanced dataset
-</td></tr><tr><td>17035089959a14fe644ab1d3b160586c67327db2</td><td></td></tr><tr><td>17aa78bd4331ef490f24bdd4d4cd21d22a18c09c</td><td></td></tr><tr><td>1742ffea0e1051b37f22773613f10f69d2e4ed2c</td><td></td></tr><tr><td>1791f790b99471fc48b7e9ec361dc505955ea8b1</td><td></td></tr><tr><td>174930cac7174257515a189cd3ecfdd80ee7dd54</td><td>Multi-view Face Detection Using Deep Convolutional
+</td></tr><tr><td>17035089959a14fe644ab1d3b160586c67327db2</td><td></td></tr><tr><td>17a995680482183f3463d2e01dd4c113ebb31608</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. X, NO. Y, MONTH Z
+<br/>Structured Label Inference for
+<br/>Visual Understanding
+</td></tr><tr><td>17aa78bd4331ef490f24bdd4d4cd21d22a18c09c</td><td></td></tr><tr><td>17c0d99171efc957b88c31a465c59485ab033234</td><td></td></tr><tr><td>1742ffea0e1051b37f22773613f10f69d2e4ed2c</td><td></td></tr><tr><td>1791f790b99471fc48b7e9ec361dc505955ea8b1</td><td></td></tr><tr><td>174930cac7174257515a189cd3ecfdd80ee7dd54</td><td>Multi-view Face Detection Using Deep Convolutional
<br/>Neural Networks
<br/>Yahoo
<br/>Mohammad Saberian
@@ -3505,10 +4585,22 @@
</td></tr><tr><td>17fad2cc826d2223e882c9fda0715fcd5475acf3</td><td></td></tr><tr><td>1750db78b7394b8fb6f6f949d68f7c24d28d934f</td><td>Detecting Facial Retouching Using Supervised
<br/>Deep Learning
<br/>Bowyer, Fellow, IEEE
+</td></tr><tr><td>173657da03e3249f4e47457d360ab83b3cefbe63</td><td>HKU-Face: A Large Scale Dataset for
+<br/>Deep Face Recognition
+<br/>Final Report
+<br/>3035140108
+<br/>COMP4801 Final Year Project
+<br/>Project Code: 17007
</td></tr><tr><td>7ba0bf9323c2d79300f1a433ff8b4fe0a00ad889</td><td></td></tr><tr><td>7bfe085c10761f5b0cc7f907bdafe1ff577223e0</td><td></td></tr><tr><td>7b9b3794f79f87ca8a048d86954e0a72a5f97758</td><td>DOI 10.1515/jisys-2013-0016      Journal of Intelligent Systems 2013; 22(4): 365–415
<br/>Passing an Enhanced Turing Test –
<br/>Interacting with Lifelike Computer
<br/>Representations of Specific Individuals 
+</td></tr><tr><td>7b0f1fc93fb24630eb598330e13f7b839fb46cce</td><td>Learning to Find Eye Region Landmarks for Remote Gaze
+<br/>Estimation in Unconstrained Settings
+<br/>ETH Zurich
+<br/>MPI for Informatics
+<br/>MPI for Informatics
+<br/>ETH Zurich
</td></tr><tr><td>7bdcd85efd1e3ce14b7934ff642b76f017419751</td><td>289
<br/>Learning Discriminant Face Descriptor
</td></tr><tr><td>7b3b7769c3ccbdf7c7e2c73db13a4d32bf93d21f</td><td>On the Design and Evaluation of Robust Head Pose for
@@ -3530,10 +4622,22 @@
<br/>Laboratory of Intelligent and
<br/>Safe Automobiles
<br/>UCSD - La Jolla, CA, USA
-</td></tr><tr><td>8fb611aca3bd8a3a0527ac0f38561a5a9a5b8483</td><td></td></tr><tr><td>8f8c0243816f16a21dea1c20b5c81bc223088594</td><td></td></tr><tr><td>8f89aed13cb3555b56fccd715753f9ea72f27f05</td><td>Attended End-to-end Architecture for Age
+</td></tr><tr><td>8f772d9ce324b2ef5857d6e0b2a420bc93961196</td><td>MAHPOD et al.: CFDRNN
+<br/>Facial Landmark Point Localization using
+<br/>Coarse-to-Fine Deep Recurrent Neural Network
+</td></tr><tr><td>8fb611aca3bd8a3a0527ac0f38561a5a9a5b8483</td><td></td></tr><tr><td>8fda2f6b85c7e34d3e23927e501a4b4f7fc15b2a</td><td>Feature Selection with Annealing for Big Data
+<br/>Learning
+</td></tr><tr><td>8f9c37f351a91ed416baa8b6cdb4022b231b9085</td><td>Generative Adversarial Style Transfer Networks for Face Aging
+<br/>Sveinn Palsson
+<br/>D-ITET, ETH Zurich
+<br/>Eirikur Agustsson
+<br/>D-ITET, ETH Zurich
+</td></tr><tr><td>8f8c0243816f16a21dea1c20b5c81bc223088594</td><td></td></tr><tr><td>8f89aed13cb3555b56fccd715753f9ea72f27f05</td><td>Attended End-to-end Architecture for Age
<br/>Estimation from Facial Expression Videos
</td></tr><tr><td>8f9f599c05a844206b1bd4947d0524234940803d</td><td></td></tr><tr><td>8fd9c22b00bd8c0bcdbd182e17694046f245335f</td><td>  
<br/>Recognizing Facial Expressions in Videos
+</td></tr><tr><td>8a866bc0d925dfd8bb10769b8b87d7d0ff01774d</td><td>WikiArt Emotions: An Annotated Dataset of Emotions Evoked by Art
+<br/>National Research Council Canada
</td></tr><tr><td>8a40b6c75dd6392ee0d3af73cdfc46f59337efa9</td><td></td></tr><tr><td>8a91ad8c46ca8f4310a442d99b98c80fb8f7625f</td><td>2592
<br/>2D Segmentation Using a Robust Active
<br/>Shape Model With the EM Algorithm
@@ -3545,7 +4649,7 @@
<br/>Detecting Visually Observable Disease
<br/>Symptoms from Faces
<br/>Open Access
-</td></tr><tr><td>7e8016bef2c180238f00eecc6a50eac473f3f138</td><td>TECHNISCHE UNIVERSIT ¨AT M ¨UNCHEN
+</td></tr><tr><td>7e600faee0ba11467d3f7aed57258b0db0448a72</td><td></td></tr><tr><td>7e8016bef2c180238f00eecc6a50eac473f3f138</td><td>TECHNISCHE UNIVERSIT ¨AT M ¨UNCHEN
<br/>Lehrstuhl f¨ur Mensch-Maschine-Kommunikation
<br/>Immersive Interactive Data Mining and Machine
<br/>Learning Algorithms for Big Data Visualization
@@ -3622,7 +4726,10 @@
</td></tr><tr><td>102e374347698fe5404e1d83f441630b1abf62d9</td><td>Facial Image Analysis for Fully-Automatic
<br/>Prediction of Difficult Endotracheal Intubation
</td></tr><tr><td>100641ed8a5472536dde53c1f50fa2dd2d4e9be9</td><td>Visual Attributes for Enhanced Human-Machine Communication*
-</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td></td></tr><tr><td>10e704c82616fb5d9c48e0e68ee86d4f83789d96</td><td></td></tr><tr><td>106732a010b1baf13c61d0994552aee8336f8c85</td><td>Expanded Parts Model for Semantic Description
+</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td></td></tr><tr><td>10e704c82616fb5d9c48e0e68ee86d4f83789d96</td><td></td></tr><tr><td>101569eeef2cecc576578bd6500f1c2dcc0274e2</td><td>Multiaccuracy: Black-Box Post-Processing for Fairness in
+<br/>Classification
+<br/>James Zou
+</td></tr><tr><td>106732a010b1baf13c61d0994552aee8336f8c85</td><td>Expanded Parts Model for Semantic Description
<br/>of Humans in Still Images
</td></tr><tr><td>10e70a34d56258d10f468f8252a7762950830d2b</td><td></td></tr><tr><td>102b27922e9bd56667303f986404f0e1243b68ab</td><td>Wang et al. Appl Inform (2017) 4:13
<br/>DOI 10.1186/s40535-017-0042-5
@@ -3707,6 +4814,9 @@
<br/>Google Inc.
<br/>Google Inc.
<br/>Google Inc.
+</td></tr><tr><td>197c64c36e8a9d624a05ee98b740d87f94b4040c</td><td>Regularized Greedy Column Subset Selection
+<br/>aDepartment of Computer Systems, Universidad Polit´ecnica de Madrid
+<br/>bDepartment of Applied Mathematics, Universidad Polit´ecnica de Madrid
</td></tr><tr><td>19d4855f064f0d53cb851e9342025bd8503922e2</td><td>Learning SURF Cascade for Fast and Accurate Object Detection
<br/>Intel Labs China
</td></tr><tr><td>19eb486dcfa1963c6404a9f146c378fc7ae3a1df</td><td></td></tr><tr><td>4c6daffd092d02574efbf746d086e6dc0d3b1e91</td><td></td></tr><tr><td>4c6e1840451e1f86af3ef1cb551259cb259493ba</td><td>HAND POSTURE DATASET CREATION FOR GESTURE
@@ -3717,7 +4827,7 @@
<br/>38271 Universidad de La Laguna, Spain
<br/>Keywords:
<br/>Image understanding, Gesture recognition, Hand dataset.
-</td></tr><tr><td>4c815f367213cc0fb8c61773cd04a5ca8be2c959</td><td>978-1-4244-4296-6/10/$25.00 ©2010 IEEE
+</td></tr><tr><td>4c29e1f31660ba33e46d7e4ffdebb9b8c6bd5adc</td><td></td></tr><tr><td>4c815f367213cc0fb8c61773cd04a5ca8be2c959</td><td>978-1-4244-4296-6/10/$25.00 ©2010 IEEE
<br/>2470
<br/>ICASSP 2010
</td></tr><tr><td>4c4236b62302957052f1bbfbd34dbf71ac1650ec</td><td>SEMI-SUPERVISED FACE RECOGNITION WITH LDA SELF-TRAINING
@@ -3749,6 +4859,12 @@
<br/>Interactions
<br/>Prepared for:
<br/>Office of Naval Research
+</td></tr><tr><td>26e570049aaedcfa420fc8c7b761bc70a195657c</td><td>J Sign Process Syst
+<br/>DOI 10.1007/s11265-017-1276-0
+<br/>Hybrid Facial Regions Extraction for Micro-expression
+<br/>Recognition System
+<br/>Received: 2 February 2016 / Revised: 20 October 2016 / Accepted: 10 August 2017
+<br/>© Springer Science+Business Media, LLC 2017
</td></tr><tr><td>21ef129c063bad970b309a24a6a18cbcdfb3aff5</td><td>POUR L'OBTENTION DU GRADE DE DOCTEUR ÈS SCIENCESacceptée sur proposition du jury:Dr J.-M. Vesin, président du juryProf. J.-Ph. Thiran, Prof. D. Sander, directeurs de thèseProf. M. F. Valstar, rapporteurProf. H. K. Ekenel, rapporteurDr S. Marcel, rapporteurIndividual and Inter-related Action Unit Detection in Videos for Affect RecognitionTHÈSE NO 6837 (2016)ÉCOLE POLYTECHNIQUE FÉDÉRALE DE LAUSANNEPRÉSENTÉE LE 19 FÉVRIER 2016À LA FACULTÉ DES SCIENCES ET TECHNIQUES DE L'INGÉNIEURLABORATOIRE DE TRAITEMENT DES SIGNAUX 5PROGRAMME DOCTORAL EN GÉNIE ÉLECTRIQUE Suisse2016PARAnıl YÜCE </td></tr><tr><td>218b2c5c9d011eb4432be4728b54e39f366354c1</td><td>Enhancing Training Collections for Image
<br/>Annotation: An Instance-Weighted Mixture
<br/>Modeling Approach
@@ -3796,10 +4912,19 @@
<br/>
</td></tr><tr><td>4d2975445007405f8cdcd74b7fd1dd547066f9b8</td><td>Image and Video Processing
<br/>for Affective Applications
-</td></tr><tr><td>4df889b10a13021928007ef32dc3f38548e5ee56</td><td></td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td></td></tr><tr><td>4dd6d511a8bbc4d9965d22d79ae6714ba48c8e41</td><td></td></tr><tr><td>4d7e1eb5d1afecb4e238ba05d4f7f487dff96c11</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+</td></tr><tr><td>4df889b10a13021928007ef32dc3f38548e5ee56</td><td></td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td></td></tr><tr><td>4db9e5f19366fe5d6a98ca43c1d113dac823a14d</td><td>Combining Crowdsourcing and Face Recognition to Identify Civil War Soldiers
+<br/>Are 1,000 Features Worth A Picture?
+<br/>Department of Computer Science and Center for Human-Computer Interaction
+<br/>Virginia Tech, Arlington, VA, USA
+</td></tr><tr><td>4dd6d511a8bbc4d9965d22d79ae6714ba48c8e41</td><td></td></tr><tr><td>4d7e1eb5d1afecb4e238ba05d4f7f487dff96c11</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
<br/>2352
<br/>ICASSP 2017
-</td></tr><tr><td>4d90bab42806d082e3d8729067122a35bbc15e8d</td><td></td></tr><tr><td>4d0ef449de476631a8d107c8ec225628a67c87f9</td><td>© 2010 IEEE. Personal use of this material is permitted. Permission from IEEE
+</td></tr><tr><td>4d90bab42806d082e3d8729067122a35bbc15e8d</td><td></td></tr><tr><td>4d6ad0c7b3cf74adb0507dc886993e603c863e8c</td><td>Human Activity Recognition Based on Wearable
+<br/>Sensor Data: A Standardization of the
+<br/>State-of-the-Art
+<br/>Smart Surveillance Interest Group, Computer Science Department
+<br/>Universidade Federal de Minas Gerais, Brazil
+</td></tr><tr><td>4d0ef449de476631a8d107c8ec225628a67c87f9</td><td>© 2010 IEEE. Personal use of this material is permitted. Permission from IEEE
<br/>must be obtained for all other uses, in any current or future media, including
<br/>reprinting/republishing this material for advertising or promotional purposes,
<br/>creating new collective works, for resale or redistribution to servers or lists, or
@@ -3807,14 +4932,82 @@
<br/>Pre-print of article that appeared at BTAS 2010.
<br/>The published article can be accessed from:
<br/>http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=5634517
+</td></tr><tr><td>4d47261b2f52c361c09f7ab96fcb3f5c22cafb9f</td><td>Deep multi-frame face super-resolution
+<br/>Evgeniya Ustinova, Victor Lempitsky
+<br/>October 17, 2017
+</td></tr><tr><td>75879ab7a77318bbe506cb9df309d99205862f6c</td><td>Analysis Of Emotion Recognition From Facial
+<br/>Expressions Using Spatial And Transform Domain
+<br/>Methods
+</td></tr><tr><td>7574f999d2325803f88c4915ba8f304cccc232d1</td><td>Transfer Learning For Cross-Dataset Recognition: A Survey
+<br/>This paper summarises and analyses the cross-dataset recognition transfer learning techniques with the
+<br/>emphasis on what kinds of methods can be used when the available source and target data are presented
+<br/>in different forms for boosting the target task. This paper for the first time summarises several transferring
+<br/>criteria in details from the concept level, which are the key bases to guide what kind of knowledge to transfer
+<br/>between datasets. In addition, a taxonomy of cross-dataset scenarios and problems is proposed according the
+<br/>properties of data that define how different datasets are diverged, thereby review the recent advances on
+<br/>each specific problem under different scenarios. Moreover, some real world applications and corresponding
+<br/>commonly used benchmarks of cross-dataset recognition are reviewed. Lastly, several future directions are
+<br/>identified.
+<br/>Additional Key Words and Phrases: Cross-dataset, transfer learning, domain adaptation
+<br/>1. INTRODUCTION
+<br/>It has been explored how human would transfer learning in one context to another
+<br/>similar context [Woodworth and Thorndike 1901; Perkins et al. 1992] in the field of
+<br/>Psychology and Education. For example, learning to drive a car helps a person later
+<br/>to learn more quickly to drive a truck, and learning mathematics prepares students to
+<br/>study physics. The machine learning algorithms are mostly inspired by human brains.
+<br/>However, most of them require a huge amount of training examples to learn a new
+<br/>model from scratch and fail to apply knowledge learned from previous domains or
+<br/>tasks. This may be due to that a basic assumption of statistical learning theory is
+<br/>that the training and test data are drawn from the same distribution and belong to
+<br/>the same task. Intuitively, learning from scratch is not realistic and practical, because
+<br/>it violates how human learn things. In addition, manually labelling a large amount
+<br/>of data for new domain or task is labour extensive, especially for the modern “data-
+<br/>hungry” and “data-driven” learning techniques (i.e. deep learning). However, the big
+<br/>data era provides a huge amount available data collected for other domains and tasks.
+<br/>Hence, how to use the previously available data smartly for the current task with
+<br/>scarce data will be beneficial for real world applications.
+<br/>To reuse the previous knowledge for current tasks, the differences between old data
+<br/>and new data need to be taken into account. Take the object recognition as an ex-
+<br/>ample. As claimed by Torralba and Efros [2011], despite the great efforts of object
+<br/>datasets creators, the datasets appear to have strong build-in bias caused by various
+<br/>factors, such as selection bias, capture bias, category or label bias, and negative set
+<br/>bias. This suggests that no matter how big the dataset is, it is impossible to cover
+<br/>the complexity of the real visual world. Hence, the dataset bias needs to be consid-
+<br/>ered before reusing data from previous datasets. Pan and Yang [2010] summarise that
+<br/>the differences between different datasets can be caused by domain divergence (i.e.
+<br/>distribution shift or feature space difference) or task divergence (i.e. conditional dis-
+<br/>tribution shift or label space difference), or both. For example, in visual recognition,
+<br/>the distributions between the previous and current data can be discrepant due to the
+<br/>different environments, lighting, background, sensor types, resolutions, view angles,
+<br/>and post-processing. Those external factors may cause the distribution divergence or
+<br/>even feature space divergence between different domains. On the other hand, the task
+<br/>divergence between current and previous data is also ubiquitous. For example, it is
+<br/>highly possible that an animal species that we want to recognize have not been seen
+<br/>ACM Journal Name, Vol. V, No. N, Article A, Publication date: January YYYY.
</td></tr><tr><td>75e9a141b85d902224f849ea61ab135ae98e7bfb</td><td></td></tr><tr><td>75503aff70a61ff4810e85838a214be484a674ba</td><td>Improved Facial Expression Recognition via Uni-Hyperplane Classification
<br/>S.W. Chew∗, S. Lucey†, P. Lucey‡, S. Sridharan∗, and J.F. Cohn‡
</td></tr><tr><td>75cd81d2513b7e41ac971be08bbb25c63c37029a</td><td></td></tr><tr><td>75e5ba7621935b57b2be7bf4a10cad66a9c445b9</td><td></td></tr><tr><td>75859ac30f5444f0d9acfeff618444ae280d661d</td><td>Multibiometric Cryptosystems based on Feature
<br/>Level Fusion
+</td></tr><tr><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td><td>IEEE TRANSACTIONS ON AFFECTIVE COMPUTING
+<br/>AffectNet: A Database for Facial Expression,
+<br/>Valence, and Arousal Computing in the Wild
+</td></tr><tr><td>754f7f3e9a44506b814bf9dc06e44fecde599878</td><td>Quantized Densely Connected U-Nets for
+<br/>Efficient Landmark Localization
+</td></tr><tr><td>75249ebb85b74e8932496272f38af274fbcfd696</td><td>Face Identification in Large Galleries
+<br/>Smart Surveillance Interest Group, Department of Computer Science
+<br/>Universidade Federal de Minas Gerais, Belo Horizonte, Brazil
+</td></tr><tr><td>81a142c751bf0b23315fb6717bc467aa4fdfbc92</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+<br/>1767
+<br/>ICASSP 2017
</td></tr><tr><td>8147ee02ec5ff3a585dddcd000974896cb2edc53</td><td>Angular Embedding:
<br/>A Robust Quadratic Criterion
<br/>Stella X. Yu, Member,
<br/>IEEE
+</td></tr><tr><td>8199803f476c12c7f6c0124d55d156b5d91314b6</td><td>The iNaturalist Species Classification and Detection Dataset
+<br/>1Caltech
+<br/>2Google
+<br/>3Cornell Tech
+<br/>4iNaturalist
</td></tr><tr><td>81831ed8e5b304e9d28d2d8524d952b12b4cbf55</td><td></td></tr><tr><td>81b2a541d6c42679e946a5281b4b9dc603bc171c</td><td>Universit¨at Ulm | 89069 Ulm | Deutschland
<br/>Fakult¨at f¨ur Ingenieurwissenschaften und Informatik
<br/>Institut f¨ur Neuroinformatik
@@ -4084,15 +5277,17 @@
</td></tr><tr><td>86b105c3619a433b6f9632adcf9b253ff98aee87</td><td>1­4244­0367­7/06/$20.00 ©2006 IEEE
<br/>1013
<br/>ICME 2006
-</td></tr><tr><td>86b51bd0c80eecd6acce9fc538f284b2ded5bcdd</td><td></td></tr><tr><td>8699268ee81a7472a0807c1d3b1db0d0ab05f40d</td><td></td></tr><tr><td>72a00953f3f60a792de019a948174bf680cd6c9f</td><td>Stat Comput (2007) 17:57–70
+</td></tr><tr><td>86b51bd0c80eecd6acce9fc538f284b2ded5bcdd</td><td></td></tr><tr><td>8699268ee81a7472a0807c1d3b1db0d0ab05f40d</td><td></td></tr><tr><td>869583b700ecf33a9987447aee9444abfe23f343</td><td></td></tr><tr><td>72a00953f3f60a792de019a948174bf680cd6c9f</td><td>Stat Comput (2007) 17:57–70
<br/>DOI 10.1007/s11222-006-9004-9
<br/>Understanding the role of facial asymmetry in human face
<br/>identification
<br/>Received: May 2005 / Accepted: September 2006 / Published online: 30 January 2007
<br/>C(cid:1) Springer Science + Business Media, LLC 2007
-</td></tr><tr><td>726b8aba2095eef076922351e9d3a724bb71cb51</td><td></td></tr><tr><td>72ecaff8b57023f9fbf8b5b2588f3c7019010ca7</td><td>Facial Keypoints Detection
+</td></tr><tr><td>726b8aba2095eef076922351e9d3a724bb71cb51</td><td></td></tr><tr><td>721b109970bf5f1862767a1bec3f9a79e815f79a</td><td></td></tr><tr><td>72ecaff8b57023f9fbf8b5b2588f3c7019010ca7</td><td>Facial Keypoints Detection
+</td></tr><tr><td>72591a75469321074b072daff80477d8911c3af3</td><td>Group Component Analysis for Multi-block Data:
+<br/>Common and Individual Feature Extraction
</td></tr><tr><td>729dbe38538fbf2664bc79847601f00593474b05</td><td></td></tr><tr><td>729a9d35bc291cc7117b924219bef89a864ce62c</td><td>Recognizing Material Properties from Images
-</td></tr><tr><td>72c0c8deb9ea6f59fde4f5043bff67366b86bd66</td><td>Age progression in Human Faces : A Survey
+</td></tr><tr><td>721d9c387ed382988fce6fa864446fed5fb23173</td><td></td></tr><tr><td>72c0c8deb9ea6f59fde4f5043bff67366b86bd66</td><td>Age progression in Human Faces : A Survey
</td></tr><tr><td>445461a34adc4bcdccac2e3c374f5921c93750f8</td><td>Emotional Expression Classification using Time-Series Kernels∗
</td></tr><tr><td>4414a328466db1e8ab9651bf4e0f9f1fe1a163e4</td><td>1164
<br/>© EURASIP, 2010 ISSN 2076-1465
@@ -4110,6 +5305,10 @@
<br/>Eikeo
<br/>11 rue Leon Jouhaux,
<br/>F-75010, Paris, France
+</td></tr><tr><td>44b1399e8569a29eed0d22d88767b1891dbcf987</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication.
+<br/>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+<br/>Learning Multi-modal Latent Attributes
+</td></tr><tr><td>446dc1413e1cfaee0030dc74a3cee49a47386355</td><td>Recent Advances in Zero-shot Recognition
</td></tr><tr><td>44a3ec27f92c344a15deb8e5dc3a5b3797505c06</td><td>A Taxonomy of Part and Attribute Discovery
<br/>Techniques
</td></tr><tr><td>44aeda8493ad0d44ca1304756cc0126a2720f07b</td><td>Face Alive Icons
@@ -4135,10 +5334,46 @@
<br/>Unknown Institution 2
<br/>Anonymous Author 3
<br/>Unknown Institution 3
-</td></tr><tr><td>2aaa6969c03f435b3ea8431574a91a0843bd320b</td><td></td></tr><tr><td>2ad7cef781f98fd66101fa4a78e012369d064830</td><td></td></tr><tr><td>2ad29b2921aba7738c51d9025b342a0ec770c6ea</td><td></td></tr><tr><td>2a6bba2e81d5fb3c0fd0e6b757cf50ba7bf8e924</td><td></td></tr><tr><td>2ae139b247057c02cda352f6661f46f7feb38e45</td><td>Combining Modality Specific Deep Neural Networks for
+</td></tr><tr><td>2aaa6969c03f435b3ea8431574a91a0843bd320b</td><td></td></tr><tr><td>2ad7cef781f98fd66101fa4a78e012369d064830</td><td></td></tr><tr><td>2ad29b2921aba7738c51d9025b342a0ec770c6ea</td><td></td></tr><tr><td>2a6bba2e81d5fb3c0fd0e6b757cf50ba7bf8e924</td><td></td></tr><tr><td>2aec012bb6dcaacd9d7a1e45bc5204fac7b63b3c</td><td>Robust Registration and Geometry Estimation from Unstructured
+<br/>Facial Scans
+</td></tr><tr><td>2ae139b247057c02cda352f6661f46f7feb38e45</td><td>Combining Modality Specific Deep Neural Networks for
<br/>Emotion Recognition in Video
<br/>1École Polytechique de Montréal, Université de Montréal, Montréal, Canada
<br/>2Laboratoire d’Informatique des Systèmes Adaptatifs, Université de Montréal, Montréal, Canada
+</td></tr><tr><td>2a5903bdb3fdfb4d51f70b77f16852df3b8e5f83</td><td>121
+<br/>The Effect of Computer-Generated Descriptions
+<br/>on Photo-Sharing Experiences of People With
+<br/>Visual Impairments
+<br/>Like sighted people, visually impaired people want to share photographs on social networking services, but
+<br/>find it difficult to identify and select photos from their albums. We aimed to address this problem by
+<br/>incorporating state-of-the-art computer-generated descriptions into Facebook’s photo-sharing feature. We
+<br/>interviewed 12 visually impaired participants to understand their photo-sharing experiences and designed a
+<br/>photo description feature for the Facebook mobile application. We evaluated this feature with six
+<br/>participants in a seven-day diary study. We found that participants used the descriptions to recall and
+<br/>organize their photos, but they hesitated to upload photos without a sighted person’s input. In addition to
+<br/>basic information about photo content, participants wanted to know more details about salient objects and
+<br/>people, and whether the photos reflected their personal aesthetic. We discuss these findings from the lens of
+<br/>self-disclosure and self-presentation theories and propose new computer vision research directions that will
+<br/>better support visual content sharing by visually impaired people.
+<br/>CCS Concepts: • Information interfaces and presentations → Multimedia and information systems; •
+<br/>Social and professional topics → People with disabilities
+<br/>KEYWORDS
+<br/>Visual impairments; computer-generated descriptions; SNSs; photo sharing; self-disclosure; self-presentation
+<br/>ACM Reference format:
+<br/>The Effect of Computer-Generated Descriptions On Photo-Sharing Experiences of People With Visual
+<br/>Impairments. Proc. ACM Hum.-Comput. Interact. 1, CSCW. 121 (November 2017), 22 pages.
+<br/>DOI: 10.1145/3134756
+<br/>1 INTRODUCTION
+<br/>Sharing memories and experiences via photos is a common way to engage with others on social networking
+<br/>services (SNSs) [39,46,51]. For instance, Facebook users uploaded more than 350 million photos a day [24]
+<br/>and Twitter, which initially supported only text in tweets, now has more than 28.4% of tweets containing
+<br/>images [39]. Visually impaired people (both blind and low vision) have a strong presence on SNS and are
+<br/>interested in sharing photos [50]. They take photos for the same reasons that sighted people do: sharing
+<br/>daily moments with their sighted friends and family [30,32]. A prior study showed that visually impaired
+<br/>people shared a relatively large number of photos on Facebook—only slightly less than their sighted
+<br/>counterparts [50].
+<br/>
+<br/> PACM on Human-Computer Interaction, Vol. 1, No. 2, Article 121. Publication date: November 2017
</td></tr><tr><td>2a02355c1155f2d2e0cf7a8e197e0d0075437b19</td><td></td></tr><tr><td>2aea27352406a2066ddae5fad6f3f13afdc90be9</td><td></td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>Acquiring Linear Subspaces for Face
<br/>Recognition under Variable Lighting
<br/>David Kriegman, Senior Member, IEEE
@@ -4165,7 +5400,9 @@
</td></tr><tr><td>2f16459e2e24dc91b3b4cac7c6294387d4a0eacf</td><td></td></tr><tr><td>2f59f28a1ca3130d413e8e8b59fb30d50ac020e2</td><td>Children Gender Recognition Under Unconstrained
<br/>Conditions Based on Contextual Information
<br/>Joint Research Centre, European Commission, Ispra, Italy
-</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>Names and Faces in the News
+</td></tr><tr><td>2f88d3189723669f957d83ad542ac5c2341c37a5</td><td>Downloaded From: https://www.spiedigitallibrary.org/journals/Journal-of-Electronic-Imaging on 9/13/2018
+<br/>Terms of Use: https://www.spiedigitallibrary.org/terms-of-use
+<br/>Attribute-correlatedlocalregionsfordeeprelativeattributeslearningFenZhangXiangweiKongZeJiaFenZhang,XiangweiKong,ZeJia,“Attribute-correlatedlocalregionsfordeeprelativeattributeslearning,”J.Electron.Imaging27(4),043021(2018),doi:10.1117/1.JEI.27.4.043021. </td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>Names and Faces in the News
<br/>Computer Science Division
<br/>U.C. Berkeley
<br/>Berkeley, CA 94720
@@ -4185,7 +5422,7 @@
<br/>Convolutional Neural Network
<br/><b></b><br/>Vogt-K¨olln-Straße 30, 22527 Hamburg, Germany
<br/>http://www.informatik.uni-hamburg.de/WTM/
-</td></tr><tr><td>2faa09413162b0a7629db93fbb27eda5aeac54ca</td><td>NISTIR 7674
+</td></tr><tr><td>2fea258320c50f36408032c05c54ba455d575809</td><td></td></tr><tr><td>2faa09413162b0a7629db93fbb27eda5aeac54ca</td><td>NISTIR 7674
<br/>Quantifying How Lighting and Focus
<br/>Affect Face Recognition Performance
<br/>Phillips, P. J.
@@ -4229,7 +5466,7 @@
<br/>Anand, INDIA
<br/>Anand, INDIA
<br/>Anand, INDIA
-</td></tr><tr><td>43476cbf2a109f8381b398e7a1ddd794b29a9a16</td><td>A Practical Transfer Learning Algorithm for Face Verification
+</td></tr><tr><td>43e268c118ac25f1f0e984b57bc54f0119ded520</td><td></td></tr><tr><td>43476cbf2a109f8381b398e7a1ddd794b29a9a16</td><td>A Practical Transfer Learning Algorithm for Face Verification
<br/>David Wipf
</td></tr><tr><td>4353d0dcaf450743e9eddd2aeedee4d01a1be78b</td><td>Learning Discriminative LBP-Histogram Bins
<br/>for Facial Expression Recognition
@@ -4250,6 +5487,9 @@
<br/>Chennai, India
<br/>IIT Madras
<br/>Chennai, India
+</td></tr><tr><td>43d7d0d0d0e2d6cf5355e60c4fe5b715f0a1101a</td><td>Pobrane z czasopisma Annales AI- Informatica http://ai.annales.umcs.pl
+<br/>Data: 04/05/2018 16:53:32
+<br/>U M CS
</td></tr><tr><td>889bc64c7da8e2a85ae6af320ae10e05c4cd6ce7</td><td>174
<br/>Using Support Vector Machines to Enhance the
<br/>Performance of Bayesian Face Recognition
@@ -4266,6 +5506,9 @@
</td></tr><tr><td>883006c0f76cf348a5f8339bfcb649a3e46e2690</td><td>Weakly Supervised Pain Localization using Multiple Instance Learning
</td></tr><tr><td>88f2952535df5859c8f60026f08b71976f8e19ec</td><td>A neural network framework for face
<br/>recognition by elastic bunch graph matching
+</td></tr><tr><td>8818b12aa0ff3bf0b20f9caa250395cbea0e8769</td><td>Fashion Conversation Data on Instagram
+<br/>∗Graduate School of Culture Technology, KAIST, South Korea
+<br/>†Department of Communication Studies, UCLA, USA
</td></tr><tr><td>8878871ec2763f912102eeaff4b5a2febfc22fbe</td><td>3781
<br/>Human Action Recognition in Unconstrained
<br/>Videos by Explicit Motion Modeling
@@ -4288,7 +5531,7 @@
<br/>Sarnoff Corporation
<br/>201 Washington Rd,
<br/>Princeton, NJ, 08540
-</td></tr><tr><td>6b9aa288ce7740ec5ce9826c66d059ddcfd8dba9</td><td></td></tr><tr><td>6b089627a4ea24bff193611e68390d1a4c3b3644</td><td>CROSS-POLLINATION OF NORMALISATION
+</td></tr><tr><td>6b333b2c6311e36c2bde920ab5813f8cfcf2b67b</td><td></td></tr><tr><td>6b9aa288ce7740ec5ce9826c66d059ddcfd8dba9</td><td></td></tr><tr><td>6b089627a4ea24bff193611e68390d1a4c3b3644</td><td>CROSS-POLLINATION OF NORMALISATION
<br/>TECHNIQUES FROM SPEAKER TO FACE
<br/>AUTHENTICATION USING GAUSSIAN
<br/>MIXTURE MODELS
@@ -4322,6 +5565,8 @@
<br/> OPEN ACCESS
<br/>Robust Face Recognition and Tagging in Visual Surveillance
<br/>System
+</td></tr><tr><td>0750a816858b601c0dbf4cfb68066ae7e788f05d</td><td>CosFace: Large Margin Cosine Loss for Deep Face Recognition
+<br/>Tencent AI Lab
</td></tr><tr><td>0716e1ad868f5f446b1c367721418ffadfcf0519</td><td>Interactively Guiding Semi-Supervised
<br/>Clustering via Attribute-Based Explanations
<br/>Virginia Tech, Blacksburg, VA, USA
@@ -4354,11 +5599,26 @@
<br/>Algorithm
<br/>M.Tech Scholar, Dept of CSE, QISCET, ONGOLE, Dist: Prakasam, AP, India.
<br/>Associate Professor, Department of CSE, QISCET, ONGOLE, Dist: Prakasam, AP, India
+</td></tr><tr><td>3803b91e784922a2dacd6a18f61b3100629df932</td><td>Temporal Multimodal Fusion
+<br/>for Video Emotion Classification in the Wild
+<br/>Orange Labs
+<br/>Cesson-Sévigné, France
+<br/>Orange Labs
+<br/>Cesson-Sévigné, France
+<br/>Normandie Univ., UNICAEN,
+<br/>ENSICAEN, CNRS
+<br/>Caen, France
+</td></tr><tr><td>38eea307445a39ee7902c1ecf8cea7e3dcb7c0e7</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Multi-distance Support Matrix Machine
+<br/>Received: date / Accepted: date
</td></tr><tr><td>385750bcf95036c808d63db0e0b14768463ff4c6</td><td></td></tr><tr><td>384f972c81c52fe36849600728865ea50a0c4670</td><td>1
<br/>Multi-Fold Gabor, PCA and ICA Filter
<br/>Convolution Descriptor for Face Recognition
<br/>
-</td></tr><tr><td>38861d0d3a0292c1f54153b303b0d791cbba1d50</td><td></td></tr><tr><td>38192a0f9261d9727b119e294a65f2e25f72d7e6</td><td></td></tr><tr><td>0077cd8f97cafd2b389783858a6e4ab7887b0b6b</td><td>MAI et al.: ON THE RECONSTRUCTION OF DEEP FACE TEMPLATES
+</td></tr><tr><td>380d5138cadccc9b5b91c707ba0a9220b0f39271</td><td>Deep Imbalanced Learning for Face Recognition
+<br/>and Attribute Prediction
+</td></tr><tr><td>38861d0d3a0292c1f54153b303b0d791cbba1d50</td><td></td></tr><tr><td>38192a0f9261d9727b119e294a65f2e25f72d7e6</td><td></td></tr><tr><td>00fb2836068042c19b5197d0999e8e93b920eb9c</td><td></td></tr><tr><td>0077cd8f97cafd2b389783858a6e4ab7887b0b6b</td><td>MAI et al.: ON THE RECONSTRUCTION OF DEEP FACE TEMPLATES
<br/>On the Reconstruction of Deep Face Templates
</td></tr><tr><td>00214fe1319113e6649435cae386019235474789</td><td>Bachelorarbeit im Fach Informatik
<br/>Face Recognition using
@@ -4375,7 +5635,7 @@
<br/>Prof. Dr. B. Leibe
<br/>Betreuer:
<br/>September 2009
-</td></tr><tr><td>00f0ed04defec19b4843b5b16557d8d0ccc5bb42</td><td></td></tr><tr><td>0037bff7be6d463785d4e5b2671da664cd7ef746</td><td>Author manuscript, published in "European Conference on Computer Vision (ECCV '10) 6311 (2010) 634--647"
+</td></tr><tr><td>0004f72a00096fa410b179ad12aa3a0d10fc853c</td><td></td></tr><tr><td>00f0ed04defec19b4843b5b16557d8d0ccc5bb42</td><td></td></tr><tr><td>0037bff7be6d463785d4e5b2671da664cd7ef746</td><td>Author manuscript, published in "European Conference on Computer Vision (ECCV '10) 6311 (2010) 634--647"
<br/> DOI : 10.1007/978-3-642-15549-9_46
</td></tr><tr><td>00d9d88bb1bdca35663946a76d807fff3dc1c15f</td><td>Subjects and Their Objects: Localizing Interactees for a
<br/>Person-Centric View of Importance
@@ -4403,8 +5663,23 @@
<br/>Preserving Structure in Model-Free Tracking
</td></tr><tr><td>0059b3dfc7056f26de1eabaafd1ad542e34c2c2e</td><td></td></tr><tr><td>6e198f6cc4199e1c4173944e3df6f39a302cf787</td><td>MORPH-II: Inconsistencies and Cleaning Whitepaper
<br/>NSF-REU Site at UNC Wilmington, Summer 2017
-</td></tr><tr><td>6eaf446dec00536858548fe7cc66025b70ce20eb</td><td></td></tr><tr><td>6eba25166fe461dc388805cc2452d49f5d1cdadd</td><td>Pages 122.1-122.12
+</td></tr><tr><td>6eaf446dec00536858548fe7cc66025b70ce20eb</td><td></td></tr><tr><td>6e91be2ad74cf7c5969314b2327b513532b1be09</td><td>Dimensionality Reduction with Subspace Structure
+<br/>Preservation
+<br/>Department of Computer Science
+<br/>SUNY Buffalo
+<br/>Buffalo, NY 14260
+</td></tr><tr><td>6eba25166fe461dc388805cc2452d49f5d1cdadd</td><td>Pages 122.1-122.12
<br/>DOI: https://dx.doi.org/10.5244/C.30.122
+</td></tr><tr><td>6e8a81d452a91f5231443ac83e4c0a0db4579974</td><td>Illumination robust face representation based on intrinsic geometrical
+<br/>information
+<br/>Soyel, H; Ozmen, B; McOwan, PW
+<br/>This is a pre-copyedited, author-produced PDF of an article accepted for publication in IET
+<br/>Conference on Image Processing (IPR 2012). The version of record is available
+<br/>http://ieeexplore.ieee.org/document/6290632/?arnumber=6290632&tag=1
+<br/>For additional information about this publication click this link.
+<br/>http://qmro.qmul.ac.uk/xmlui/handle/123456789/16147
+<br/>Information about this research object was correct at the time of download; we occasionally
+<br/>make corrections to records, please therefore check the published record when citing. For
</td></tr><tr><td>6ecd4025b7b5f4894c990614a9a65e3a1ac347b2</td><td>International Journal on Recent and Innovation Trends in Computing and Communication
<br/>
<br/> ISSN: 2321-8169
@@ -4421,6 +5696,11 @@
<br/>Nasik, Maharashtra, India,
</td></tr><tr><td>6eaeac9ae2a1697fa0aa8e394edc64f32762f578</td><td></td></tr><tr><td>6ee2ea416382d659a0dddc7a88fc093accc2f8ee</td><td></td></tr><tr><td>6e3a181bf388dd503c83dc324561701b19d37df1</td><td>Finding a low-rank basis in a matrix subspace
<br/>Andr´e Uschmajew
+</td></tr><tr><td>6e8c3b7d25e6530a631ea01fbbb93ac1e8b69d2f</td><td>Deep Episodic Memory: Encoding, Recalling, and Predicting
+<br/>Episodic Experiences for Robot Action Execution
+</td></tr><tr><td>6e911227e893d0eecb363015754824bf4366bdb7</td><td>Wasserstein Divergence for GANs
+<br/>1 Computer Vision Lab, ETH Zurich, Switzerland
+<br/>2 VISICS, KU Leuven, Belgium
</td></tr><tr><td>6ee8a94ccba10062172e5b31ee097c846821a822</td><td>Submitted 3/13; Revised 10/13; Published 12/13
<br/>How to Solve Classification and Regression Problems on
<br/>High-Dimensional Data with a Supervised
@@ -4558,6 +5838,14 @@
<br/>Using Local Directional Binary Pattern
<br/>Electrical Engineering Dept., AmirKabir Univarsity of Technology
<br/>Tehran, Iran
+</td></tr><tr><td>9a23a0402ae68cc6ea2fe0092b6ec2d40f667adb</td><td>High-Resolution Image Synthesis and Semantic Manipulation with Conditional GANs
+<br/>1NVIDIA Corporation
+<br/>2UC Berkeley
+<br/>Figure 1: We propose a generative adversarial framework for synthesizing 2048 × 1024 images from semantic label maps
+<br/>(lower left corner in (a)). Compared to previous work [5], our results express more natural textures and details. (b) We can
+<br/>change labels in the original label map to create new scenes, like replacing trees with buildings. (c) Our framework also
+<br/>allows a user to edit the appearance of individual objects in the scene, e.g. changing the color of a car or the texture of a road.
+<br/>Please visit our website for more side-by-side comparisons as well as interactive editing demos.
</td></tr><tr><td>9a7858eda9b40b16002c6003b6db19828f94a6c6</td><td>MOONEY FACE CLASSIFICATION AND PREDICTION BY LEARNING ACROSS TONE
<br/>(cid:63) UC Berkeley / †ICSI
</td></tr><tr><td>9a276c72acdb83660557489114a494b86a39f6ff</td><td>Emotion Classification through Lower Facial Expressions using Adaptive
@@ -4565,7 +5853,16 @@
<br/>Department of Information Technology, Faculty of Industrial Technology and Management,
</td></tr><tr><td>9a42c519f0aaa68debbe9df00b090ca446d25bc4</td><td>Face Recognition via Centralized Coordinate
<br/>Learning
-</td></tr><tr><td>36b40c75a3e53c633c4afb5a9309d10e12c292c7</td><td></td></tr><tr><td>365f67fe670bf55dc9ccdcd6888115264b2a2c56</td><td></td></tr><tr><td>36fe39ed69a5c7ff9650fd5f4fe950b5880760b0</td><td>Tracking von Gesichtsmimik
+</td></tr><tr><td>9aad8e52aff12bd822f0011e6ef85dfc22fe8466</td><td>Temporal-Spatial Mapping for Action Recognition
+</td></tr><tr><td>36b40c75a3e53c633c4afb5a9309d10e12c292c7</td><td></td></tr><tr><td>3646b42511a6a0df5470408bc9a7a69bb3c5d742</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Applications of Computers and Electronics for the Welfare of Rural Masses (ACEWRM) 2015
+<br/>Detection of Facial Parts based on ABLATA
+<br/>Technical Campus, Bhilai
+<br/>Vikas Singh
+<br/>Technical Campus, Bhilai
+<br/>Abha Choubey
+<br/>Technical Campus, Bhilai
+</td></tr><tr><td>365f67fe670bf55dc9ccdcd6888115264b2a2c56</td><td></td></tr><tr><td>36fe39ed69a5c7ff9650fd5f4fe950b5880760b0</td><td>Tracking von Gesichtsmimik
<br/>mit Hilfe von Gitterstrukturen
<br/>zur Klassifikation von schmerzrelevanten Action
<br/>Units
@@ -4605,6 +5902,33 @@
<br/>network using constructive training algorithm
<br/>Received: 5 February 2014 / Revised: 22 August 2014 / Accepted: 13 October 2014
<br/>© Springer Science+Business Media New York 2014
+</td></tr><tr><td>3674f3597bbca3ce05e4423611d871d09882043b</td><td>ISSN 1796-2048
+<br/>Volume 7, Number 4, August 2012
+<br/>Contents
+<br/>Special Issue: Multimedia Contents Security in Social Networks Applications
+<br/>Guest Editors: Zhiyong Zhang and Muthucumaru Maheswaran
+<br/>Guest Editorial
+<br/>Zhiyong Zhang and Muthucumaru Maheswaran
+<br/>SPECIAL ISSUE PAPERS
+<br/>DRTEMBB: Dynamic Recommendation Trust Evaluation Model Based on Bidding
+<br/>Gang Wang and Xiao-lin Gui
+<br/>Block-Based Parallel Intra Prediction Scheme for HEVC
+<br/>Jie Jiang, Baolong, Wei Mo, and Kefeng Fan
+<br/>Optimized LSB Matching Steganography Based on Fisher Information
+<br/>Yi-feng Sun, Dan-mei Niu, Guang-ming Tang, and Zhan-zhan Gao
+<br/>A Novel Robust Zero-Watermarking Scheme Based on Discrete Wavelet Transform
+<br/>Yu Yang, Min Lei, Huaqun Liu, Yajian Zhou, and Qun Luo
+<br/>Stego Key Estimation in LSB Steganography
+<br/>Jing Liu and Guangming Tang
+<br/>REGULAR PAPERS
+<br/>Facial Expression Spacial Charts for Describing Dynamic Diversity of Facial Expressions
+<br/>277
+<br/>279
+<br/>289
+<br/>295
+<br/>303
+<br/>309
+<br/>314
</td></tr><tr><td>362a70b6e7d55a777feb7b9fc8bc4d40a57cde8c</td><td>978-1-4799-9988-0/16/$31.00 ©2016 IEEE
<br/>2792
<br/>ICASSP 2016
@@ -4624,10 +5948,19 @@
<br/>ICIP 2013
</td></tr><tr><td>5c473cfda1d7c384724fbb139dfe8cb39f79f626</td><td></td></tr><tr><td>5c5e1f367e8768a9fb0f1b2f9dbfa060a22e75c0</td><td>2132
<br/>Reference Face Graph for Face Recognition
+</td></tr><tr><td>5c35ac04260e281141b3aaa7bbb147032c887f0c</td><td>Face Detection and Tracking Control with Omni Car
+<br/>CS 231A Final Report
+<br/>June 31, 2016
</td></tr><tr><td>5c717afc5a9a8ccb1767d87b79851de8d3016294</td><td>978-1-4673-0046-9/12/$26.00 ©2012 IEEE
<br/>1845
<br/>ICASSP 2012
-</td></tr><tr><td>0952ac6ce94c98049d518d29c18d136b1f04b0c0</td><td></td></tr><tr><td>09718bf335b926907ded5cb4c94784fd20e5ccd8</td><td>875
+</td></tr><tr><td>0952ac6ce94c98049d518d29c18d136b1f04b0c0</td><td></td></tr><tr><td>09137e3c267a3414314d1e7e4b0e3a4cae801f45</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Two Birds with One Stone: Transforming and Generating
+<br/>Facial Images with Iterative GAN
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>09926ed62511c340f4540b5bc53cf2480e8063f8</td><td>Action Tubelet Detector for Spatio-Temporal Action Localization
+</td></tr><tr><td>09718bf335b926907ded5cb4c94784fd20e5ccd8</td><td>875
<br/>Recognizing Partially Occluded, Expression Variant
<br/>Faces From Single Training Image per Person
<br/>With SOM and Soft k-NN Ensemble
@@ -4691,6 +6024,23 @@
<br/>An Empirical Study of Context in Object Detection
<br/>Anonymous CVPR submission
<br/>Paper ID 987
+</td></tr><tr><td>09df62fd17d3d833ea6b5a52a232fc052d4da3f5</td><td>ISSN: 1405-5546
+<br/>Instituto Politécnico Nacional
+<br/>México
+<br/>
+<br/>Rivas Araiza, Edgar A.; Mendiola Santibañez, Jorge D.; Herrera Ruiz, Gilberto; González Gutiérrez,
+<br/>Carlos A.; Trejo Perea, Mario; Ríos Moreno, G. J.
+<br/>Mejora de Contraste y Compensación en Cambios de la Iluminación
+<br/>Instituto Politécnico Nacional
+<br/>Distrito Federal, México
+<br/>Disponible en: http://www.redalyc.org/articulo.oa?id=61509703
+<br/> Cómo citar el artículo
+<br/> Número completo
+<br/> Más información del artículo
+<br/> Página de la revista en redalyc.org
+<br/>Sistema de Información Científica
+<br/>Red de Revistas Científicas de América Latina, el Caribe, España y Portugal
+<br/>Proyecto académico sin fines de lucro, desarrollado bajo la iniciativa de acceso abierto
</td></tr><tr><td>097104fc731a15fad07479f4f2c4be2e071054a2</td><td></td></tr><tr><td>09f853ce12f7361c4b50c494df7ce3b9fad1d221</td><td>myjournal manuscript No.
<br/>(will be inserted by the editor)
<br/>Random forests for real time 3D face analysis
@@ -4715,7 +6065,15 @@
<br/>Facial Emotions
<br/>School of Mechatronic Engineering, Universiti Malaysia Perlis, 02600, Ulu Pauh, Arau, Perlis, West Malaysia
</td></tr><tr><td>5d7f8eb73b6a84eb1d27d1138965eb7aef7ba5cf</td><td>Robust Registration of Dynamic Facial Sequences
-</td></tr><tr><td>5dcf78de4d3d867d0fd4a3105f0defae2234b9cb</td><td></td></tr><tr><td>5d5cd6fa5c41eb9d3d2bab3359b3e5eb60ae194e</td><td>Face Recognition Algorithms
+</td></tr><tr><td>5dcf78de4d3d867d0fd4a3105f0defae2234b9cb</td><td></td></tr><tr><td>5db4fe0ce9e9227042144758cf6c4c2de2042435</td><td>INTERNATIONAL JOURNAL OF ELECTRICAL AND ELECTRONIC SYSTEMS RESEARCH, VOL.3, JUNE 2010
+<br/>Recognition of Facial Expression Using Haar
+<br/>Wavelet Transform
+<br/>for
+<br/>paper
+<br/>features
+<br/>investigates
+<br/>
+</td></tr><tr><td>5d5cd6fa5c41eb9d3d2bab3359b3e5eb60ae194e</td><td>Face Recognition Algorithms
<br/>June 16, 2010
<br/>Ion Marqu´es
<br/>Supervisor:
@@ -4801,7 +6159,9 @@
<br/>filters with improved performance in terms of several competing
<br/>metrics, a search and optimization strategy is required to auto-
<br/>matically choose the set of training templates.
-</td></tr><tr><td>5d01283474b73a46d80745ad0cc0c4da14aae194</td><td></td></tr><tr><td>5d197c8cd34473eb6cde6b65ced1be82a3a1ed14</td><td><b>AFaceImageDatabaseforEvaluatingOut-of-FocusBlurQiHan,QiongLiandXiamuNiuHarbinInstituteofTechnologyChina1.IntroductionFacerecognitionisoneofthemostpopularresearchfieldsofcomputervisionandmachinelearning(Tores(2004);Zhaoetal.(2003)).Alongwithinvestigationoffacerecognitionalgorithmsandsystems,manyfaceimagedatabaseshavebeencollected(Gross(2005)).Facedatabasesareimportantfortheadvancementoftheresearchfield.Becauseofthenonrigidityandcomplex3Dstructureofface,manyfactorsinfluencetheperformanceoffacedetectionandrecognitionalgorithmssuchaspose,expression,age,brightness,contrast,noise,blurandetc.Someearlyfacedatabasesgatheredunderstrictlycontrolledenvironment(Belhumeuretal.(1997);Samaria&Harter(1994);Turk&Pentland(1991))onlyallowslightexpressionvariation.Toinvestigatetherelationshipsbetweenalgorithms’performanceandtheabovefactors,morefacedatabaseswithlargerscaleandvariouscharacterswerebuiltinthepastyears(Bailly-Bailliereetal.(2003);Flynnetal.(2003);Gaoetal.(2008);Georghiadesetal.(2001);Hallinan(1995);Phillipsetal.(2000);Simetal.(2003)).Forinstance,The"CAS-PEAL","FERET","CMUPIE",and"YaleB"databasesincludevariousposes(Gaoetal.(2008);Georghiadesetal.(2001);Phillipsetal.(2000);Simetal.(2003));The"HarvardRL","CMUPIE"and"YaleB"databasesinvolvemorethan40differentconditionsinillumination(Georghiadesetal.(2001);Hallinan(1995);Simetal.(2003));Andthe"BANCA",and"NDHID"databasescontainover10timesgathering(Bailly-Bailliereetal.(2003);Flynnetal.(2003)).Thesedatabaseshelpresearcherstoevaluateandimprovetheiralgorithmsaboutfacedetection,recognition,andotherpurposes.Blurisnotthemostimportantbutstillanotablefactoraffectingtheperformanceofabiometricsystem(Fronthaleretal.(2006);Zamanietal.(2007)).Themainreasonsleadingblurconsistinout-of-focusofcameraandmotionofobject,andtheout-of-focusblurismoresignificantintheapplicationenvironmentoffacerecognition(Eskicioglu&Fisher(1995);Kimetal.(1998);Tanakaetal.(2007);Yitzhaky&Kopeika(1996)).Toinvestigatetheinfluenceofbluronafacerecognitionsystem,afaceimagedatabasewithdifferentconditionsofclarityandefficientblurevaluatingalgorithmsareneeded.Thischapterintroducesanewfacedatabasebuiltforthepurposeofblurevaluation.Theapplicationenvironmentsoffacerecognitionareanalyzedfirstly,thenaimagegatheringschemeisdesigned.Twotypicalgatheringfacilitiesareusedandthefocusstatusaredividedinto11steps.Further,theblurassessmentalgorithmsaresummarizedandthecomparisonbetweenthemisraisedonthevarious-claritydatabase.The7www.intechopen.com</b></td></tr><tr><td>31aa20911cc7a2b556e7d273f0bdd5a2f0671e0a</td><td></td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td></td></tr><tr><td>31c0968fb5f587918f1c49bf7fa51453b3e89cf7</td><td>Deep Transfer Learning for Person Re-identification
+</td></tr><tr><td>5d01283474b73a46d80745ad0cc0c4da14aae194</td><td></td></tr><tr><td>5d197c8cd34473eb6cde6b65ced1be82a3a1ed14</td><td><b>AFaceImageDatabaseforEvaluatingOut-of-FocusBlurQiHan,QiongLiandXiamuNiuHarbinInstituteofTechnologyChina1.IntroductionFacerecognitionisoneofthemostpopularresearchfieldsofcomputervisionandmachinelearning(Tores(2004);Zhaoetal.(2003)).Alongwithinvestigationoffacerecognitionalgorithmsandsystems,manyfaceimagedatabaseshavebeencollected(Gross(2005)).Facedatabasesareimportantfortheadvancementoftheresearchfield.Becauseofthenonrigidityandcomplex3Dstructureofface,manyfactorsinfluencetheperformanceoffacedetectionandrecognitionalgorithmssuchaspose,expression,age,brightness,contrast,noise,blurandetc.Someearlyfacedatabasesgatheredunderstrictlycontrolledenvironment(Belhumeuretal.(1997);Samaria&Harter(1994);Turk&Pentland(1991))onlyallowslightexpressionvariation.Toinvestigatetherelationshipsbetweenalgorithms’performanceandtheabovefactors,morefacedatabaseswithlargerscaleandvariouscharacterswerebuiltinthepastyears(Bailly-Bailliereetal.(2003);Flynnetal.(2003);Gaoetal.(2008);Georghiadesetal.(2001);Hallinan(1995);Phillipsetal.(2000);Simetal.(2003)).Forinstance,The"CAS-PEAL","FERET","CMUPIE",and"YaleB"databasesincludevariousposes(Gaoetal.(2008);Georghiadesetal.(2001);Phillipsetal.(2000);Simetal.(2003));The"HarvardRL","CMUPIE"and"YaleB"databasesinvolvemorethan40differentconditionsinillumination(Georghiadesetal.(2001);Hallinan(1995);Simetal.(2003));Andthe"BANCA",and"NDHID"databasescontainover10timesgathering(Bailly-Bailliereetal.(2003);Flynnetal.(2003)).Thesedatabaseshelpresearcherstoevaluateandimprovetheiralgorithmsaboutfacedetection,recognition,andotherpurposes.Blurisnotthemostimportantbutstillanotablefactoraffectingtheperformanceofabiometricsystem(Fronthaleretal.(2006);Zamanietal.(2007)).Themainreasonsleadingblurconsistinout-of-focusofcameraandmotionofobject,andtheout-of-focusblurismoresignificantintheapplicationenvironmentoffacerecognition(Eskicioglu&Fisher(1995);Kimetal.(1998);Tanakaetal.(2007);Yitzhaky&Kopeika(1996)).Toinvestigatetheinfluenceofbluronafacerecognitionsystem,afaceimagedatabasewithdifferentconditionsofclarityandefficientblurevaluatingalgorithmsareneeded.Thischapterintroducesanewfacedatabasebuiltforthepurposeofblurevaluation.Theapplicationenvironmentsoffacerecognitionareanalyzedfirstly,thenaimagegatheringschemeisdesigned.Twotypicalgatheringfacilitiesareusedandthefocusstatusaredividedinto11steps.Further,theblurassessmentalgorithmsaresummarizedandthecomparisonbetweenthemisraisedonthevarious-claritydatabase.The7www.intechopen.com</b></td></tr><tr><td>31aa20911cc7a2b556e7d273f0bdd5a2f0671e0a</td><td></td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td></td></tr><tr><td>318e7e6daa0a799c83a9fdf7dd6bc0b3e89ab24a</td><td>Sparsity in Dynamics of Spontaneous
+<br/>Subtle Emotions: Analysis & Application
+</td></tr><tr><td>31c0968fb5f587918f1c49bf7fa51453b3e89cf7</td><td>Deep Transfer Learning for Person Re-identification
</td></tr><tr><td>31e57fa83ac60c03d884774d2b515813493977b9</td><td></td></tr><tr><td>316e67550fbf0ba54f103b5924e6537712f06bee</td><td>Multimodal semi-supervised learning
<br/>for image classification
<br/>LEAR team, INRIA Grenoble, France
@@ -4827,7 +6187,11 @@
<br/>Publisher: Springer
<br/>http://link.springer.com/content/pdf/10.1007%2F978-3-
<br/>642-04146-4_50.pdf
-</td></tr><tr><td>91883dabc11245e393786d85941fb99a6248c1fb</td><td></td></tr><tr><td>91b1a59b9e0e7f4db0828bf36654b84ba53b0557</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI
+</td></tr><tr><td>91883dabc11245e393786d85941fb99a6248c1fb</td><td></td></tr><tr><td>917bea27af1846b649e2bced624e8df1d9b79d6f</td><td>Ultra Power-Efficient CNN Domain Specific Accelerator with 9.3TOPS/Watt for
+<br/>Mobile and Embedded Applications
+<br/>Gyrfalcon Technology Inc.
+<br/>1900 McCarthy Blvd. Milpitas, CA 95035
+</td></tr><tr><td>91b1a59b9e0e7f4db0828bf36654b84ba53b0557</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI
<br/>> REPLACE THIS LINE WITH YOUR PAPER IDENTIFICATION NUMBER (DOUBLE-CLICK HERE TO EDIT) <
<br/>
<br/>Simultaneous Hallucination and Recognition of
@@ -4835,10 +6199,17 @@
<br/>Decomposition
<br/>(SVD)
<br/>for performing both
+</td></tr><tr><td>911bef7465665d8b194b6b0370b2b2389dfda1a1</td><td>RANJAN, ROMERO, BLACK: LEARNING HUMAN OPTICAL FLOW
+<br/>Learning Human Optical Flow
+<br/>1 MPI for Intelligent Systems
+<br/>Tübingen, Germany
+<br/>2 Amazon Inc.
+</td></tr><tr><td>91ead35d1d2ff2ea7cf35d15b14996471404f68d</td><td>Combining and Steganography of 3D Face Textures
</td></tr><tr><td>919d0e681c4ef687bf0b89fe7c0615221e9a1d30</td><td></td></tr><tr><td>912a6a97af390d009773452814a401e258b77640</td><td></td></tr><tr><td>91d513af1f667f64c9afc55ea1f45b0be7ba08d4</td><td>Automatic Face Image Quality Prediction
</td></tr><tr><td>918b72a47b7f378bde0ba29c908babf6dab6f833</td><td></td></tr><tr><td>91e58c39608c6eb97b314b0c581ddaf7daac075e</td><td>Pixel-wise Ear Detection with Convolutional
<br/>Encoder-Decoder Networks
-</td></tr><tr><td>91d2fe6fdf180e8427c65ffb3d895bf9f0ec4fa0</td><td></td></tr><tr><td>915d4a0fb523249ecbc88eb62cb150a60cf60fa0</td><td>Comparison of Feature Extraction Techniques in Automatic
+</td></tr><tr><td>91d2fe6fdf180e8427c65ffb3d895bf9f0ec4fa0</td><td></td></tr><tr><td>9131c990fad219726eb38384976868b968ee9d9c</td><td>Deep Facial Expression Recognition: A Survey
+</td></tr><tr><td>915d4a0fb523249ecbc88eb62cb150a60cf60fa0</td><td>Comparison of Feature Extraction Techniques in Automatic
<br/>Face Recognition Systems for Security Applications
<br/>S . Cruz-Llanas, J. Ortega-Garcia, E. Martinez-Torrico, J. Gonzalez-Rodriguez
<br/>Dpto. Ingenieria Audiovisual y Comunicaciones, EUIT Telecomunicacion, Univ. PolitCcnica de Madrid, Spain
@@ -4901,7 +6272,7 @@
<br/>for Visual Recognition
<br/>Doctoral Thesis
<br/>Stockholm, Sweden, 2017
-</td></tr><tr><td>65817963194702f059bae07eadbf6486f18f4a0a</td><td>http://dx.doi.org/10.1007/s11263-015-0814-0
+</td></tr><tr><td>656f05741c402ba43bb1b9a58bcc5f7ce2403d9a</td><td></td></tr><tr><td>65817963194702f059bae07eadbf6486f18f4a0a</td><td>http://dx.doi.org/10.1007/s11263-015-0814-0
<br/>WhittleSearch: Interactive Image Search with Relative Attribute
<br/>Feedback
<br/>Received: date / Accepted: date
@@ -4925,6 +6296,9 @@
<br/>Technische Universität München
<br/>KIT – Universität des Landes Baden-Württemberg und nationales Forschungszentrum in der Helmholtz-Gemeinschaft
<br/>www.kit.edu
+</td></tr><tr><td>65babb10e727382b31ca5479b452ee725917c739</td><td>Label Distribution Learning
+</td></tr><tr><td>62dccab9ab715f33761a5315746ed02e48eed2a0</td><td>A Short Note about Kinetics-600
+<br/>Jo˜ao Carreira
</td></tr><tr><td>62d1a31b8acd2141d3a994f2d2ec7a3baf0e6dc4</td><td>Ding et al. EURASIP Journal on Image and Video Processing (2017) 2017:43
<br/>DOI 10.1186/s13640-017-0188-z
<br/>EURASIP Journal on Image
@@ -4939,7 +6313,77 @@
<br/>for Mathematics
<br/>Huerta-Pacheco1
<br/>*Corresponding author
-</td></tr><tr><td>6257a622ed6bd1b8759ae837b50580657e676192</td><td></td></tr><tr><td>620e1dbf88069408b008347cd563e16aeeebeb83</td><td></td></tr><tr><td>62a30f1b149843860938de6dd6d1874954de24b7</td><td>418
+</td></tr><tr><td>620339aef06aed07a78f9ed1a057a25433faa58b</td><td></td></tr><tr><td>62b3598b401c807288a113796f424612cc5833ca</td><td></td></tr><tr><td>628a3f027b7646f398c68a680add48c7969ab1d9</td><td>Plan for Final Year Project:
+<br/>HKU-Face: A Large Scale Dataset for Deep Face
+<br/>Recognition
+<br/>3035140108
+<br/>3035141841
+<br/>Introduction
+<br/>Face recognition has been one of the most successful techniques in the field of artificial intelligence
+<br/>because of its surpassing human-level performance in academic experiments and broad application in
+<br/>the industrial world. Gaussian-face[1] and Facenet[2] hold state-of-the-art record using statistical
+<br/>method and deep-learning method respectively. What’s more, face recognition has been applied
+<br/>in various areas like authority checking and recording, fostering a large number of start-ups like
+<br/>Face++.
+<br/>Our final year project will deal with the face recognition task by building a large-scaled and carefully-
+<br/>filtered dataset. Our project plan specifies our roadmap and current research process. This plan first
+<br/>illustrates the significance and potential enhancement in constructing large-scale face dataset for
+<br/>both academics and companies. Then objectives to accomplish and related literature review will be
+<br/>expressed in detail. Next, methodologies used, scope of our project and challenges faced by us are
+<br/>described. The detailed timeline for this project follows as well as a small summary.
+<br/>2 Motivation
+<br/>Nowadays most of the face recognition tasks are supervised learning tasks which use dataset annotated
+<br/>by human beings. This contains mainly two drawbacks: (1) limited size of dataset due to limited
+<br/>human effort; (2) accuracy problem resulted from human perceptual bias.
+<br/>Parkhi et al.[3] discuss the first problem, showing that giant companies hold private face databases
+<br/>with larger size of data (See the comparison in Table 1). Other research institution could only get
+<br/>access to public but smaller databases like LFW[4, 5], which acts like a barricade to even higher
+<br/>performance.
+<br/>Dataset
+<br/>IJB-A [6]
+<br/>LFW [4, 5]
+<br/>YFD [7]
+<br/>CelebFaces [8]
+<br/>CASIA-WebFace [9]
+<br/>MS-Celeb-1M [10]
+<br/>Facebook
+<br/>Google
+<br/>Availability
+<br/>public
+<br/>public
+<br/>public
+<br/>public
+<br/>public
+<br/>public
+<br/>private
+<br/>private
+<br/>identities
+<br/>500
+<br/>5K
+<br/>1595
+<br/>10K
+<br/>10K
+<br/>100K
+<br/>4K
+<br/>8M
+<br/>images
+<br/>5712
+<br/>13K
+<br/>3425 videos
+<br/>202K
+<br/>500K
+<br/>about 10M
+<br/>4400K
+<br/>100-200M
+<br/>Table 1: Face recognition datasets
+</td></tr><tr><td>6257a622ed6bd1b8759ae837b50580657e676192</td><td></td></tr><tr><td>626859fe8cafd25da13b19d44d8d9eb6f0918647</td><td>Activity Recognition based on a
+<br/>Magnitude-Orientation Stream Network
+<br/>Smart Surveillance Interest Group, Department of Computer Science
+<br/>Universidade Federal de Minas Gerais, Belo Horizonte, Brazil
+</td></tr><tr><td>620e1dbf88069408b008347cd563e16aeeebeb83</td><td></td></tr><tr><td>62007c30f148334fb4d8975f80afe76e5aef8c7f</td><td>Eye In-Painting with Exemplar Generative Adversarial Networks
+<br/>Facebook Inc.
+<br/>1 Hacker Way, Menlo Park (CA), USA
+</td></tr><tr><td>62a30f1b149843860938de6dd6d1874954de24b7</td><td>418
<br/>Fast Algorithm for Updating the Discriminant Vectors
<br/>of Dual-Space LDA
</td></tr><tr><td>62e0380a86e92709fe2c64e6a71ed94d152c6643</td><td>Facial Emotion Recognition With Expression Energy
@@ -4976,6 +6420,12 @@
<br/>Still Images
</td></tr><tr><td>964a3196d44f0fefa7de3403849d22bbafa73886</td><td></td></tr><tr><td>9606b1c88b891d433927b1f841dce44b8d3af066</td><td>Principal Component Analysis with Tensor Train
<br/>Subspace
+</td></tr><tr><td>96b1000031c53cd4c1c154013bb722ffd87fa7da</td><td>ContextVP: Fully Context-Aware Video
+<br/>Prediction
+<br/>1 NVIDIA, Santa Clara, CA, USA
+<br/>2 ETH Zurich, Zurich, Switzerland
+<br/>3 The Swiss AI Lab IDSIA, Manno, Switzerland
+<br/>4 NNAISENSE, Lugano, Switzerland
</td></tr><tr><td>968f472477a8afbadb5d92ff1b9c7fdc89f0c009</td><td>Firefly-based Facial Expression Recognition
</td></tr><tr><td>9636c7d3643fc598dacb83d71f199f1d2cc34415</td><td></td></tr><tr><td>3a2fc58222870d8bed62442c00341e8c0a39ec87</td><td>Probabilistic Local Variation
<br/>Segmentation
@@ -4989,6 +6439,10 @@
</td></tr><tr><td>3a0a839012575ba455f2b84c2d043a35133285f9</td><td>444
<br/>Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, pages 444–454,
<br/>Edinburgh, Scotland, UK, July 27–31, 2011. c(cid:13)2011 Association for Computational Linguistics
+</td></tr><tr><td>3a9681e2e07be7b40b59c32a49a6ff4c40c962a2</td><td>Biometrics & Biostatistics International Journal
+<br/>Comparing treatment means: overlapping standard
+<br/>errors, overlapping confidence intervals, and tests of
+<br/>hypothesis
</td></tr><tr><td>3a846704ef4792dd329a5c7a2cb8b330ab6b8b4e</td><td>in any current or
<br/>future media,
<br/>for all other uses,
@@ -5014,6 +6468,8 @@
<br/>Sricharan H S1, Srinidhi K S1, Rajath D N1, Tejas J N1, Chandrakala B M2
<br/> BE, DSCE, Bangalore1
<br/>Assistant Professor, DSCE, Bangalore2
+</td></tr><tr><td>54969bcd728b0f2d3285866c86ef0b4797c2a74d</td><td>IEEE TRANSACTION SUBMISSION
+<br/>Learning for Video Compression
</td></tr><tr><td>5456166e3bfe78a353df988897ec0bd66cee937f</td><td>Improved Boosting Performance by Exclusion
<br/>of Ambiguous Positive Examples
<br/>Computer Vision and Active Perception, KTH, Stockholm 10800, Sweden
@@ -5056,7 +6512,7 @@
<br/> M.Tech (CSE)
<br/> VKIT, Bangalore- 560040
<br/>BANGALORE, INDIA
-</td></tr><tr><td>5334ac0a6438483890d5eef64f6db93f44aacdf4</td><td></td></tr><tr><td>539ca9db570b5e43be0576bb250e1ba7a727d640</td><td></td></tr><tr><td>53c8cbc4a3a3752a74f79b74370ed8aeed97db85</td><td></td></tr><tr><td>5366573e96a1dadfcd4fd592f83017e378a0e185</td><td>Böhlen, Chandola and Salunkhe
+</td></tr><tr><td>5334ac0a6438483890d5eef64f6db93f44aacdf4</td><td></td></tr><tr><td>53dd25350d3b3aaf19beb2104f1e389e3442df61</td><td></td></tr><tr><td>530243b61fa5aea19b454b7dbcac9f463ed0460e</td><td></td></tr><tr><td>539ca9db570b5e43be0576bb250e1ba7a727d640</td><td></td></tr><tr><td>53c8cbc4a3a3752a74f79b74370ed8aeed97db85</td><td></td></tr><tr><td>5366573e96a1dadfcd4fd592f83017e378a0e185</td><td>Böhlen, Chandola and Salunkhe
<br/>Server, server in the cloud.
<br/>Who is the fairest in the crowd?
</td></tr><tr><td>533bfb82c54f261e6a2b7ed7d31a2fd679c56d18</td><td>Technical Report MSU-CSE-14-1
@@ -5084,9 +6540,17 @@
<br/>1 Center for Research in Computer Vision at UCF, Orlando, USA
<br/>2 Google Research, Mountain View, USA
<br/>http://crcv.ucf.edu/projects/DaMN/
+</td></tr><tr><td>3fb98e76ffd8ba79e1c22eda4d640da0c037e98a</td><td>Convolutional Neural Networks for Crop Yield Prediction using Satellite Images
+<br/>H. Russello
</td></tr><tr><td>3f5cf3771446da44d48f1d5ca2121c52975bb3d3</td><td></td></tr><tr><td>3f14b504c2b37a0e8119fbda0eff52efb2eb2461</td><td>5727
<br/>Joint Facial Action Unit Detection and Feature
<br/>Fusion: A Multi-Conditional Learning Approach
+</td></tr><tr><td>3f9a7d690db82cf5c3940fbb06b827ced59ec01e</td><td>VIP: Finding Important People in Images
+<br/>Virginia Tech
+<br/>Google Inc.
+<br/>Virginia Tech
+<br/>Project: https://computing.ece.vt.edu/~mclint/vip/
+<br/>Demo: http://cloudcv.org/vip/
</td></tr><tr><td>3fd90098551bf88c7509521adf1c0ba9b5dfeb57</td><td>Page 1 of 21
<br/>*****For Peer Review Only*****
<br/>10
@@ -5154,6 +6618,7 @@
<br/>Ali Pazandeh
<br/>Sharif UTech
<br/>ESAT-KU Leuven, ETH Zurich
+</td></tr><tr><td>30870ef75aa57e41f54310283c0057451c8c822b</td><td>Overcoming Catastrophic Forgetting with Hard Attention to the Task
</td></tr><tr><td>303065c44cf847849d04da16b8b1d9a120cef73a</td><td></td></tr><tr><td>3046baea53360a8c5653f09f0a31581da384202e</td><td>Deformable Face Alignment via Local
<br/>Measurements and Global Constraints
</td></tr><tr><td>3028690d00bd95f20842d4aec84dc96de1db6e59</td><td>Leveraging Union of Subspace Structure to Improve Constrained Clustering
@@ -5169,15 +6634,22 @@
<br/>IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY
<br/>Unconstrained Face Recognition Using A Set-to-Set
<br/>Distance Measure
-</td></tr><tr><td>304a306d2a55ea41c2355bd9310e332fa76b3cb0</td><td></td></tr><tr><td>5e28673a930131b1ee50d11f69573c17db8fff3e</td><td>Author manuscript, published in "Workshop on Faces in 'Real-Life' Images: Detection, Alignment, and Recognition, Marseille : France
+</td></tr><tr><td>304a306d2a55ea41c2355bd9310e332fa76b3cb0</td><td></td></tr><tr><td>5e7e055ef9ba6e8566a400a8b1c6d8f827099553</td><td></td></tr><tr><td>5e28673a930131b1ee50d11f69573c17db8fff3e</td><td>Author manuscript, published in "Workshop on Faces in 'Real-Life' Images: Detection, Alignment, and Recognition, Marseille : France
<br/>(2008)"
</td></tr><tr><td>5e6ba16cddd1797853d8898de52c1f1f44a73279</td><td>Face Identification with Second-Order Pooling
</td></tr><tr><td>5e821cb036010bef259046a96fe26e681f20266e</td><td></td></tr><tr><td>5bfc32d9457f43d2488583167af4f3175fdcdc03</td><td>International Journal of Science and Research (IJSR), India Online ISSN: 2319-7064
<br/>Local Gray Code Pattern (LGCP): A Robust
<br/>Feature Descriptor for Facial Expression
<br/>Recognition
+</td></tr><tr><td>5ba7882700718e996d576b58528f1838e5559225</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TAFFC.2016.2628787, IEEE
+<br/>Transactions on Affective Computing
+<br/>IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. X, NO. X, OCTOBER 2016
+<br/>Predicting Personalized Image Emotion
+<br/>Perceptions in Social Networks
</td></tr><tr><td>5bb684dfe64171b77df06ba68997fd1e8daffbe1</td><td></td></tr><tr><td>5bae9822d703c585a61575dced83fa2f4dea1c6d</td><td>MOTChallenge 2015:
<br/>Towards a Benchmark for Multi-Target Tracking
+</td></tr><tr><td>5babbad3daac5c26503088782fd5b62067b94fa5</td><td>Are You Sure You Want To Do That?
+<br/>Classification with Verification
</td></tr><tr><td>5b9d9f5a59c48bc8dd409a1bd5abf1d642463d65</td><td>Evolving Systems. manuscript No.
<br/>(will be inserted by the editor)
<br/>An evolving spatio-temporal approach for gender and age
@@ -5190,7 +6662,7 @@
<br/>IIIT-Delhi, New Delhi, India
<br/>Article history:
<br/>Received 29 March 2017
-</td></tr><tr><td>5be3cc1650c918da1c38690812f74573e66b1d32</td><td>Relative Parts: Distinctive Parts for Learning Relative Attributes
+</td></tr><tr><td>5b2cfee6e81ef36507ebf3c305e84e9e0473575a</td><td></td></tr><tr><td>5be3cc1650c918da1c38690812f74573e66b1d32</td><td>Relative Parts: Distinctive Parts for Learning Relative Attributes
<br/>Center for Visual Information Technology, IIIT Hyderabad, India - 500032
</td></tr><tr><td>5b0ebb8430a04d9259b321fc3c1cc1090b8e600e</td><td></td></tr><tr><td>3765c26362ad1095dfe6744c6d52494ea106a42c</td><td></td></tr><tr><td>3727ac3d50e31a394b200029b2c350073c1b69e3</td><td></td></tr><tr><td>37f2e03c7cbec9ffc35eac51578e7e8fdfee3d4e</td><td>WACV
<br/>#394
@@ -5310,6 +6782,16 @@
<br/>sagepub.co.uk/journalsPermissions.nav
<br/>DOI: 10.1177/ToBeAssigned
<br/>www.sagepub.com/
+</td></tr><tr><td>08f4832507259ded9700de81f5fd462caf0d5be8</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 118 – No.14, May 2015
+<br/>Geometric Approach for Human Emotion
+<br/>Recognition using Facial Expression
+<br/>S. S. Bavkar
+<br/>Assistant Professor
+<br/>J. S. Rangole
+<br/>Assistant Professor
+<br/>V. U. Deshmukh
+<br/>Assistant Professor
</td></tr><tr><td>08d40ee6e1c0060d3b706b6b627e03d4b123377a</td><td>Human Action Localization
<br/>with Sparse Spatial Supervision
</td></tr><tr><td>08c1f8f0e69c0e2692a2d51040ef6364fb263a40</td><td></td></tr><tr><td>088aabe3da627432fdccf5077969e3f6402f0a80</td><td>Under review as a conference paper at ICLR 2018
@@ -5317,6 +6799,7 @@
<br/>OF TRAINING DATA DISTRIBUTION FROM CLASSIFIER
<br/>Anonymous authors
<br/>Paper under double-blind review
+</td></tr><tr><td>08903bf161a1e8dec29250a752ce9e2a508a711c</td><td>Joint Dimensionality Reduction and Metric Learning: A Geometric Take
</td></tr><tr><td>08e24f9df3d55364290d626b23f3d42b4772efb6</td><td>ENHANCING FACIAL EXPRESSION CLASSIFICATION BY INFORMATION
<br/>FUSION
<br/>I. Buciu1, Z. Hammal 2, A. Caplier2, N. Nikolaidis 1, and I. Pitas 1
@@ -5325,7 +6808,13 @@
<br/>web: http://www.aiia.csd.auth.gr
<br/>38031 Grenoble, France
<br/>web: http://www.lis.inpg.fr
-</td></tr><tr><td>0830c9b9f207007d5e07f5269ffba003235e4eff</td><td></td></tr><tr><td>081fb4e97d6bb357506d1b125153111b673cc128</td><td></td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>Understanding Kin Relationships in a Photo
+</td></tr><tr><td>0830c9b9f207007d5e07f5269ffba003235e4eff</td><td></td></tr><tr><td>081fb4e97d6bb357506d1b125153111b673cc128</td><td></td></tr><tr><td>0857281a3b6a5faba1405e2c11f4e17191d3824d</td><td>Chude-Olisah et al. EURASIP Journal on Advances in Signal Processing 2014, 2014:102
+<br/>http://asp.eurasipjournals.com/content/2014/1/102
+<br/>R ES EAR CH
+<br/>Face recognition via edge-based Gabor feature
+<br/>representation for plastic surgery-altered images
+<br/>Open Access
+</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>Understanding Kin Relationships in a Photo
</td></tr><tr><td>082ad50ac59fc694ba4369d0f9b87430553b11db</td><td></td></tr><tr><td>6dd052df6b0e89d394192f7f2af4a3e3b8f89875</td><td>International Journal of Engineering and Advanced Technology (IJEAT)
<br/>ISSN: 2249 – 8958, Volume-2, Issue-4, April 2013
<br/>A literature survey on Facial Expression
@@ -5335,7 +6824,12 @@
<br/>vol. 7 (2014), pp. 25-40
<br/>A Survey on Newer Prospective
<br/>Biometric Authentication Modalities
-</td></tr><tr><td>6d10beb027fd7213dd4bccf2427e223662e20b7d</td><td></td></tr><tr><td>6de18708218988b0558f6c2f27050bb4659155e4</td><td></td></tr><tr><td>6d91da37627c05150cb40cac323ca12a91965759</td><td></td></tr><tr><td>6d66c98009018ac1512047e6bdfb525c35683b16</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. 25, NO. 9, SEPTEMBER 2003
+</td></tr><tr><td>6d10beb027fd7213dd4bccf2427e223662e20b7d</td><td></td></tr><tr><td>6dddf1440617bf7acda40d4d75c7fb4bf9517dbb</td><td>JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, MM YY
+<br/>Beyond Counting: Comparisons of Density Maps for Crowd
+<br/>Analysis Tasks - Counting, Detection, and Tracking
+</td></tr><tr><td>6de18708218988b0558f6c2f27050bb4659155e4</td><td></td></tr><tr><td>6d91da37627c05150cb40cac323ca12a91965759</td><td></td></tr><tr><td>6d8c9a1759e7204eacb4eeb06567ad0ef4229f93</td><td>Face Alignment Robust to Pose, Expressions and
+<br/>Occlusions
+</td></tr><tr><td>6d66c98009018ac1512047e6bdfb525c35683b16</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. 25, NO. 9, SEPTEMBER 2003
<br/>1063
<br/>Face Recognition Based on
<br/>Fitting a 3D Morphable Model
@@ -5389,10 +6883,17 @@
<br/>been used in the TRECVID video retrieval series.
<br/>We took the LSCOM CYC ontology dated 2006-06-30,
<br/>which contains 2832 unique categories. We removed
+</td></tr><tr><td>01c4cf9c7c08f0ad3f386d88725da564f3c54679</td><td>Interpretability Beyond Feature Attribution:
+<br/>Quantitative Testing with Concept Activation Vectors (TCAV)
</td></tr><tr><td>017ce398e1eb9f2eed82d0b22fb1c21d3bcf9637</td><td>FACE RECOGNITION WITH HARMONIC DE-LIGHTING
<br/>2ICT-ISVISION Joint R&D Laboratory for Face Recognition, CAS, Beijing, China, 100080
<br/>1Graduate School, CAS, Beijing, China, 100080
<br/>Emails: {lyqing, sgshan, wgao}jdl.ac.cn
+</td></tr><tr><td>014e3d0fa5248e6f4634dc237e2398160294edce</td><td>Int J Comput Vis manuscript No.
+<br/>(will be inserted by the editor)
+<br/>What does 2D geometric information really tell us about
+<br/>3D face shape?
+<br/>Received: date / Accepted: date
</td></tr><tr><td>01beab8f8293a30cf48f52caea6ca0fb721c8489</td><td></td></tr><tr><td>0178929595f505ef7655272cc2c339d7ed0b9507</td><td></td></tr><tr><td>01b4b32c5ef945426b0396d32d2a12c69c282e29</td><td></td></tr><tr><td>0113b302a49de15a1d41ca4750191979ad756d2f</td><td>1­4244­0367­7/06/$20.00 ©2006 IEEE
<br/>537
<br/>ICME 2006
@@ -5533,6 +7034,8 @@
<br/>Face Recognition
</td></tr><tr><td>06262d14323f9e499b7c6e2a3dec76ad9877ba04</td><td>Real-Time Pose Estimation Piggybacked on Object Detection
<br/>Brno, Czech Republic
+</td></tr><tr><td>062c41dad67bb68fefd9ff0c5c4d296e796004dc</td><td>Temporal Generative Adversarial Nets with Singular Value Clipping
+<br/>Preferred Networks inc., Japan
</td></tr><tr><td>06400a24526dd9d131dfc1459fce5e5189b7baec</td><td>Event Recognition in Photo Collections with a Stopwatch HMM
<br/>1Computer Vision Lab
<br/>ETH Z¨urich, Switzerland
@@ -5560,6 +7063,9 @@
<br/>Activity Analysis
</td></tr><tr><td>06ad99f19cf9cb4a40741a789e4acbf4433c19ae</td><td>SenTion: A framework for Sensing Facial
<br/>Expressions
+</td></tr><tr><td>6c304f3b9c3a711a0cca5c62ce221fb098dccff0</td><td>Attentive Semantic Video Generation using Captions
+<br/>IIT Hyderabad
+<br/>IIT Hyderabad
</td></tr><tr><td>6c2b392b32b2fd0fe364b20c496fcf869eac0a98</td><td>DOI 10.1007/s00138-012-0423-7
<br/>ORIGINAL PAPER
<br/>Fully automatic face recognition framework based
@@ -5583,7 +7089,8 @@
<br/>by
<br/>David Lieh-Chiang Chen
<br/>2012
-</td></tr><tr><td>39ce143238ea1066edf0389d284208431b53b802</td><td></td></tr><tr><td>39ce2232452c0cd459e32a19c1abe2a2648d0c3f</td><td></td></tr><tr><td>3998c5aa6be58cce8cb65a64cb168864093a9a3e</td><td></td></tr><tr><td>397aeaea61ecdaa005b09198942381a7a11cd129</td><td></td></tr><tr><td>39b22bcbd452d5fea02a9ee63a56c16400af2b83</td><td></td></tr><tr><td>399a2c23bd2592ebe20aa35a8ea37d07c14199da</td><td></td></tr><tr><td>3986161c20c08fb4b9b791b57198b012519ea58b</td><td>International Journal of Soft Computing and Engineering (IJSCE)
+</td></tr><tr><td>39ce143238ea1066edf0389d284208431b53b802</td><td></td></tr><tr><td>39ce2232452c0cd459e32a19c1abe2a2648d0c3f</td><td></td></tr><tr><td>3998c5aa6be58cce8cb65a64cb168864093a9a3e</td><td></td></tr><tr><td>397aeaea61ecdaa005b09198942381a7a11cd129</td><td></td></tr><tr><td>39b22bcbd452d5fea02a9ee63a56c16400af2b83</td><td></td></tr><tr><td>399a2c23bd2592ebe20aa35a8ea37d07c14199da</td><td></td></tr><tr><td>39c8b34c1b678235b60b648d0b11d241a34c8e32</td><td>Learning to Deblur Images with Exemplars
+</td></tr><tr><td>3986161c20c08fb4b9b791b57198b012519ea58b</td><td>International Journal of Soft Computing and Engineering (IJSCE)
<br/>ISSN: 2231-2307, Volume-4 Issue-4, September 2014
<br/>An Efficient Method for Face Recognition based on
<br/>Fusion of Global and Local Feature Extraction
@@ -5593,9 +7100,27 @@
<br/>April 23, 2007
<br/>Tiny images
<br/>m a s s a c h u s e t t s i n s t i t u t e o f t e c h n o l o g y, c a m b r i d g e , m a 0 213 9 u s a — w w w. c s a i l . m i t . e d u
-</td></tr><tr><td>3958db5769c927cfc2a9e4d1ee33ecfba86fe054</td><td>Describable Visual Attributes for
+</td></tr><tr><td>3947b64dcac5bcc1d3c8e9dcb50558efbb8770f1</td><td></td></tr><tr><td>3965d61c4f3b72044f43609c808f8760af8781a2</td><td></td></tr><tr><td>395bf182983e0917f33b9701e385290b64e22f9a</td><td></td></tr><tr><td>3933e323653ff27e68c3458d245b47e3e37f52fd</td><td>Evaluation of a 3D-aided Pose Invariant 2D Face Recognition System
+<br/>Computational Biomedicine Lab
+<br/>4800 Calhoun Rd. Houston, TX, USA
+</td></tr><tr><td>39b452453bea9ce398613d8dd627984fd3a0d53c</td><td></td></tr><tr><td>3958db5769c927cfc2a9e4d1ee33ecfba86fe054</td><td>Describable Visual Attributes for
<br/>Face Verification and Image Search
-</td></tr><tr><td>39b5f6d6f8d8127b2b97ea1a4987732c0db6f9df</td><td></td></tr><tr><td>9949ac42f39aeb7534b3478a21a31bc37fe2ffe3</td><td>Parametric Stereo for Multi-Pose Face Recognition and
+</td></tr><tr><td>39b5f6d6f8d8127b2b97ea1a4987732c0db6f9df</td><td></td></tr><tr><td>994f7c469219ccce59c89badf93c0661aae34264</td><td>1
+<br/>Model Based Face Recognition Across Facial
+<br/>Expressions
+<br/>
+<br/>screens, embedded into mobiles and installed into everyday
+<br/>living and working environments they become valuable tools
+<br/>for human system interaction. A particular important aspect of
+<br/>this interaction is detection and recognition of faces and
+<br/>interpretation of facial expressions. These capabilities are
+<br/>deeply rooted in the human visual system and a crucial
+<br/>building block for social interaction. Consequently, these
+<br/>capabilities are an important step towards the acceptance of
+<br/>many technical systems.
+<br/>trees as a classifier
+<br/>lies not only
+</td></tr><tr><td>9949ac42f39aeb7534b3478a21a31bc37fe2ffe3</td><td>Parametric Stereo for Multi-Pose Face Recognition and
<br/>3D-Face Modeling
<br/>PSI ESAT-KUL
<br/>Leuven, Belgium
@@ -5606,6 +7131,14 @@
<br/>A Simple, Fast and Highly-Accurate Algorithm to
<br/>Recover 3D Shape from 2D Landmarks on a Single
<br/>Image
+</td></tr><tr><td>99c20eb5433ed27e70881d026d1dbe378a12b342</td><td>ISCA Archive
+<br/>http://www.isca-speech.org/archive
+<br/>First Workshop on Speech, Language
+<br/>and Audio in Multimedia
+<br/>Marseille, France
+<br/>August 22-23, 2013
+<br/>Proceedings of the First Workshop on Speech, Language and Audio in Multimedia (SLAM), Marseille, France, August 22-23, 2013.
+<br/>78
</td></tr><tr><td>9990e0b05f34b586ffccdc89de2f8b0e5d427067</td><td>International Journal of Modeling and Optimization, Vol. 3, No. 2, April 2013
<br/>Auto-Optimized Multimodal Expression Recognition
<br/>Framework Using 3D Kinect Data for ASD Therapeutic
@@ -5616,6 +7149,9 @@
<br/>and
<br/>to
<br/>recognize
+</td></tr><tr><td>99d7678039ad96ee29ab520ff114bb8021222a91</td><td>Political image analysis with deep neural
+<br/>networks
+<br/>November 28, 2017
</td></tr><tr><td>529e2ce6fb362bfce02d6d9a9e5de635bde81191</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication.
<br/>> TIP-05732-2009<
<br/>1
@@ -5625,13 +7161,42 @@
<br/>Travel Recommendation by Mining People
<br/>Attributes and Travel Group Types From
<br/>Community-Contributed Photos
-</td></tr><tr><td>521482c2089c62a59996425603d8264832998403</td><td></td></tr><tr><td>521b625eebea73b5deb171a350e3709a4910eebf</td><td></td></tr><tr><td>527dda77a3864d88b35e017d542cb612f275a4ec</td><td></td></tr><tr><td>52f23e1a386c87b0dab8bfdf9694c781cd0a3984</td><td></td></tr><tr><td>5239001571bc64de3e61be0be8985860f08d7e7e</td><td>SUBMITTED TO IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, JUNE 2016
+</td></tr><tr><td>521482c2089c62a59996425603d8264832998403</td><td></td></tr><tr><td>521b625eebea73b5deb171a350e3709a4910eebf</td><td></td></tr><tr><td>527dda77a3864d88b35e017d542cb612f275a4ec</td><td></td></tr><tr><td>52f23e1a386c87b0dab8bfdf9694c781cd0a3984</td><td></td></tr><tr><td>529baf1a79cca813f8c9966ceaa9b3e42748c058</td><td>Triangle Wise Mapping Technique to Transform one Face Image into Another Face Image
+<br/>
+<br/>{tag} {/tag}
+<br/>
+<br/> International Journal of Computer Applications
+<br/>
+<br/> © 2014 by IJCA Journal
+<br/> Volume 87 - Number 6
+<br/>
+<br/> Year of Publication: 2014
+<br/>
+<br/>
+<br/>
+<br/> Authors:
+<br/>
+<br/>Bhogeswar Borah
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/> 10.5120/15209-3714
+<br/> {bibtex}pxc3893714.bib{/bibtex}
+</td></tr><tr><td>5239001571bc64de3e61be0be8985860f08d7e7e</td><td>SUBMITTED TO IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, JUNE 2016
<br/>Deep Appearance Models: A Deep Boltzmann
<br/>Machine Approach for Face Modeling
</td></tr><tr><td>550858b7f5efaca2ebed8f3969cb89017bdb739f</td><td></td></tr><tr><td>554b9478fd285f2317214396e0ccd81309963efd</td><td>Spatio-Temporal Action Localization For Human Action
<br/>Recognition in Large Dataset
<br/>1L2TI, Institut Galil´ee, Universit´e Paris 13, France;
<br/>2SERCOM, Ecole Polytechnique de Tunisie
+</td></tr><tr><td>55c68c1237166679d2cb65f266f496d1ecd4bec6</td><td>Learning to Score Figure Skating Sport Videos
</td></tr><tr><td>5502dfe47ac26e60e0fb25fc0f810cae6f5173c0</td><td>Affordance Prediction via Learned Object Attributes
</td></tr><tr><td>55a158f4e7c38fe281d06ae45eb456e05516af50</td><td>The 22nd International Conference on Computer Graphics and Vision
<br/>108
@@ -5640,6 +7205,37 @@
<br/>Recurrent Neural Network for Multimodal
<br/>Information Fusion
<br/>1 Xerox Research Centre India; 2 Amazon Development Center India
+</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation
+<br/>{tag} {/tag}
+<br/> International Journal of Computer Applications
+<br/>
+<br/> Foundation of Computer Science (FCS), NY, USA
+<br/>
+<br/>
+<br/>Volume 126
+<br/>-
+<br/>Number 5
+<br/>
+<br/>
+<br/> Year of Publication: 2015
+<br/>
+<br/>
+<br/>
+<br/>
+<br/> Authors:
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/> 10.5120/ijca2015906055
+<br/> {bibtex}2015906055.bib{/bibtex}
</td></tr><tr><td>973e3d9bc0879210c9fad145a902afca07370b86</td><td>(IJACSA) International Journal of Advanced Computer Science and Applications,
<br/>Vol. 7, No. 7, 2016
<br/>From Emotion Recognition to Website
@@ -5654,6 +7250,11 @@
</td></tr><tr><td>97032b13f1371c8a813802ade7558e816d25c73f</td><td>Total Recall Final Report
<br/>Supervisor: Professor Duncan Gillies
<br/>January 11, 2006
+</td></tr><tr><td>97cf04eaf1fc0ac4de0f5ad4a510d57ce12544f5</td><td>manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Deep Affect Prediction in-the-wild: Aff-Wild Database and Challenge,
+<br/>Deep Architectures, and Beyond
+<br/>Zafeiriou4
</td></tr><tr><td>97d1d561362a8b6beb0fdbee28f3862fb48f1380</td><td>1955
<br/>Age Synthesis and Estimation via Faces:
<br/>A Survey
@@ -5664,7 +7265,19 @@
<br/>Classification in Standard
<br/>and Generalized
<br/>Dissimilarity Spaces
-</td></tr><tr><td>63d8d69e90e79806a062cb8654ad78327c8957bb</td><td></td></tr><tr><td>63eefc775bcd8ccad343433fc7a1dd8e1e5ee796</td><td></td></tr><tr><td>63340c00896d76f4b728dbef85674d7ea8d5ab26</td><td>1732
+</td></tr><tr><td>63d8d69e90e79806a062cb8654ad78327c8957bb</td><td></td></tr><tr><td>631483c15641c3652377f66c8380ff684f3e365c</td><td>Sync-DRAW: Automatic Video Generation using Deep Recurrent
+<br/>A(cid:130)entive Architectures
+<br/>Gaurav Mi(cid:138)al∗
+<br/>IIT Hyderabad
+<br/>Vineeth N Balasubramanian
+<br/>IIT Hyderabad
+</td></tr><tr><td>63eefc775bcd8ccad343433fc7a1dd8e1e5ee796</td><td></td></tr><tr><td>632fa986bed53862d83918c2b71ab953fd70d6cc</td><td>GÜNEL ET AL.: WHAT FACE AND BODY SHAPES CAN TELL ABOUT HEIGHT
+<br/>What Face and Body Shapes Can Tell
+<br/>About Height
+<br/>CVLab
+<br/>EPFL,
+<br/>Lausanne, Switzerland
+</td></tr><tr><td>63340c00896d76f4b728dbef85674d7ea8d5ab26</td><td>1732
<br/>Discriminant Subspace Analysis:
<br/>A Fukunaga-Koontz Approach
</td></tr><tr><td>63d865c66faaba68018defee0daf201db8ca79ed</td><td>Deep Regression for Face Alignment
@@ -5719,6 +7332,8 @@
<br/>© Springer Science+Business Media Dordrecht 2015
</td></tr><tr><td>0f9bf5d8f9087fcba419379600b86ae9e9940013</td><td></td></tr><tr><td>0f92e9121e9c0addc35eedbbd25d0a1faf3ab529</td><td>MORPH-II: A Proposed Subsetting Scheme
<br/>NSF-REU Site at UNC Wilmington, Summer 2017
+</td></tr><tr><td>0fd1bffb171699a968c700f206665b2f8837d953</td><td>Weakly Supervised Object Localization with
+<br/>Multi-fold Multiple Instance Learning
</td></tr><tr><td>0a511058edae582e8327e8b9d469588c25152dc6</td><td></td></tr><tr><td>0a4f3a423a37588fde9a2db71f114b293fc09c50</td><td></td></tr><tr><td>0a3863a0915256082aee613ba6dab6ede962cdcd</td><td>Early and Reliable Event Detection Using Proximity Space Representation
<br/>LTCI, CNRS, T´el´ecom ParisTech, Universit´e Paris-Saclay, 75013, Paris, France
<br/>J´erˆome Gauthier
@@ -5732,7 +7347,7 @@
<br/>The final version of record is available at
<br/> http://dx.doi.org/10.1109/TIP.2016.2539502
<br/>Discriminant Incoherent Component Analysis
-</td></tr><tr><td>0ae9cc6a06cfd03d95eee4eca9ed77b818b59cb7</td><td>Noname manuscript No.
+</td></tr><tr><td>0a6a25ee84fc0bf7284f41eaa6fefaa58b5b329a</td><td></td></tr><tr><td>0ae9cc6a06cfd03d95eee4eca9ed77b818b59cb7</td><td>Noname manuscript No.
<br/>(will be inserted by the editor)
<br/>Multi-task, multi-label and multi-domain learning with
<br/>residual convolutional networks for emotion recognition
@@ -5783,7 +7398,9 @@
<br/>Score-level Fusion for Face Recognition
<br/>1Department of Creative IT Engineering, POSTECH, Korea
<br/>2Department of Computer Science and Engineering, POSTECH, Korea
-</td></tr><tr><td>649eb674fc963ce25e4e8ce53ac7ee20500fb0e3</td><td></td></tr><tr><td>642c66df8d0085d97dc5179f735eed82abf110d0</td><td></td></tr><tr><td>641f34deb3bdd123c6b6e7b917519c3e56010cb7</td><td></td></tr><tr><td>6462ef39ca88f538405616239471a8ea17d76259</td><td></td></tr><tr><td>90cb074a19c5e7d92a1c0d328a1ade1295f4f311</td><td>MIT. Media Laboratory Affective Computing Technical Report #571
+</td></tr><tr><td>649eb674fc963ce25e4e8ce53ac7ee20500fb0e3</td><td></td></tr><tr><td>642c66df8d0085d97dc5179f735eed82abf110d0</td><td></td></tr><tr><td>641f34deb3bdd123c6b6e7b917519c3e56010cb7</td><td></td></tr><tr><td>645de797f936cb19c1b8dba3b862543645510544</td><td>Deep Temporal Linear Encoding Networks
+<br/>1ESAT-PSI, KU Leuven, 2CVL, ETH Z¨urich
+</td></tr><tr><td>6462ef39ca88f538405616239471a8ea17d76259</td><td></td></tr><tr><td>90ac0f32c0c29aa4545ed3d5070af17f195d015f</td><td></td></tr><tr><td>90cb074a19c5e7d92a1c0d328a1ade1295f4f311</td><td>MIT. Media Laboratory Affective Computing Technical Report #571
<br/>Appears in IEEE International Workshop on Analysis and Modeling of Faces and Gestures , Oct 2003
<br/>Fully Automatic Upper Facial Action Recognition
<br/>MIT Media Laboratory
@@ -5801,6 +7418,8 @@
<br/>information
<br/>Introduction
<br/>---------------------------------------------------------------------***---------------------------------------------------------------------
+</td></tr><tr><td>bf5940d57f97ed20c50278a81e901ae4656f0f2c</td><td>Query-free Clothing Retrieval via Implicit
+<br/>Relevance Feedback
</td></tr><tr><td>bfb98423941e51e3cd067cb085ebfa3087f3bfbe</td><td>Sparseness helps: Sparsity Augmented
<br/>Collaborative Representation for Classification
</td></tr><tr><td>d3b73e06d19da6b457924269bb208878160059da</td><td>Proceedings of the 5th International Conference on Computing and Informatics, ICOCI 2015
@@ -5815,7 +7434,19 @@
<br/>Learning Compact Feature Descriptor and Adaptive
<br/>Matching Framework for Face Recognition
<br/>improvements
-</td></tr><tr><td>d4c7d1a7a03adb2338704d2be7467495f2eb6c7b</td><td></td></tr><tr><td>d4ebf0a4f48275ecd8dbc2840b2a31cc07bd676d</td><td></td></tr><tr><td>d4b88be6ce77164f5eea1ed2b16b985c0670463a</td><td>TECHNICAL REPORT JAN.15.2016
+</td></tr><tr><td>d309e414f0d6e56e7ba45736d28ee58ae2bad478</td><td>Efficient Two-Stream Motion and Appearance 3D CNNs for
+<br/>Video Classification
+<br/>Ali Diba
+<br/>ESAT-KU Leuven
+<br/>Ali Pazandeh
+<br/>Sharif UTech
+<br/>Luc Van Gool
+<br/>ESAT-KU Leuven, ETH Zurich
+</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td></td></tr><tr><td>d33fcdaf2c0bd0100ec94b2c437dccdacec66476</td><td>Neurons with Paraboloid Decision Boundaries for
+<br/>Improved Neural Network Classification
+<br/>Performance
+</td></tr><tr><td>d444368421f456baf8c3cb089244e017f8d32c41</td><td>CNN for IMU Assisted Odometry Estimation using Velodyne LiDAR
+</td></tr><tr><td>d4c7d1a7a03adb2338704d2be7467495f2eb6c7b</td><td></td></tr><tr><td>d4ebf0a4f48275ecd8dbc2840b2a31cc07bd676d</td><td></td></tr><tr><td>d44a93027208816b9e871101693b05adab576d89</td><td></td></tr><tr><td>d4b88be6ce77164f5eea1ed2b16b985c0670463a</td><td>TECHNICAL REPORT JAN.15.2016
<br/>A Survey of Different 3D Face Reconstruction
<br/>Methods
<br/>Department of Computer Science and Engineering
@@ -5839,7 +7470,14 @@
<br/>Bogot´a, Colombia
<br/>Bogot´a, Colombia
<br/>Bogot´a, Colombia
-</td></tr><tr><td>ba8a99d35aee2c4e5e8a40abfdd37813bfdd0906</td><td>ELEKTROTEHNI ˇSKI VESTNIK 78(1-2): 12–17, 2011
+</td></tr><tr><td>badcd992266c6813063c153c41b87babc0ba36a3</td><td>Recent Advances in Object Detection in the Age
+<br/>of Deep Convolutional Neural Networks
+<br/>,1,2), Fr´ed´eric Jurie(1)
+<br/>(∗) equal contribution
+<br/>(1)Normandie Univ, UNICAEN, ENSICAEN, CNRS
+<br/>(2)Safran Electronics and Defense
+<br/>September 11, 2018
+</td></tr><tr><td>ba788365d70fa6c907b71a01d846532ba3110e31</td><td></td></tr><tr><td>ba8a99d35aee2c4e5e8a40abfdd37813bfdd0906</td><td>ELEKTROTEHNI ˇSKI VESTNIK 78(1-2): 12–17, 2011
<br/>EXISTING SEPARATE ENGLISH EDITION
<br/>Uporaba emotivno pogojenega raˇcunalniˇstva v
<br/>priporoˇcilnih sistemih
@@ -5919,11 +7557,51 @@
<br/>binskih priporoˇcilnih sistemov, ki sta ga razvila Arapakis
<br/>[2] in Tkalˇciˇc [14], sorodnega dela na podroˇcju emotivno
<br/>pogojenih priporoˇcilnih sistemov takorekoˇc ni. Panti´c in
-</td></tr><tr><td>ba29ba8ec180690fca702ad5d516c3e43a7f0bb8</td><td></td></tr><tr><td>bab88235a30e179a6804f506004468aa8c28ce4f</td><td></td></tr><tr><td>a0f94e9400938cbd05c4b60b06d9ed58c3458303</td><td>1118
+</td></tr><tr><td>ba29ba8ec180690fca702ad5d516c3e43a7f0bb8</td><td></td></tr><tr><td>bab88235a30e179a6804f506004468aa8c28ce4f</td><td></td></tr><tr><td>badd371a49d2c4126df95120902a34f4bee01b00</td><td>GONDA, WEI, PARAG, PFISTER: PARALLEL SEPARABLE 3D CONVOLUTION
+<br/>Parallel Separable 3D Convolution for Video
+<br/>and Volumetric Data Understanding
+<br/>Harvard John A. Paulson School of
+<br/>Engineering and Applied Sciences
+<br/>Camabridge MA, USA
+<br/>Toufiq Parag
+<br/>Hanspeter Pfister
+</td></tr><tr><td>a0f94e9400938cbd05c4b60b06d9ed58c3458303</td><td>1118
<br/>Value-Directed Human Behavior Analysis
<br/>from Video Using Partially Observable
<br/>Markov Decision Processes
-</td></tr><tr><td>a0fb5b079dd1ee5ac6ac575fe29f4418fdb0e670</td><td></td></tr><tr><td>a0dfb8aae58bd757b801e2dcb717a094013bc178</td><td>Reconocimiento de expresiones faciales con base
+</td></tr><tr><td>a022eff5470c3446aca683eae9c18319fd2406d5</td><td>2017-ENST-0071
+<br/>EDITE - ED 130
+<br/>Doctorat ParisTech
+<br/>T H È S E
+<br/>pour obtenir le grade de docteur délivré par
+<br/>TÉLÉCOM ParisTech
+<br/>Spécialité « SIGNAL et IMAGES »
+<br/>présentée et soutenue publiquement par
+<br/>le 15 décembre 2017
+<br/>Apprentissage Profond pour la Description Sémantique des Traits
+<br/>Visuels Humains
+<br/>Directeur de thèse : Jean-Luc DUGELAY
+<br/>Co-encadrement de la thèse : Moez BACCOUCHE
+<br/>Jury
+<br/>Mme Bernadette DORIZZI, PRU, Télécom SudParis
+<br/>Mme Jenny BENOIS-PINEAU, PRU, Université de Bordeaux
+<br/>M. Christian WOLF, MC/HDR, INSA de Lyon
+<br/>M. Patrick PEREZ, Chercheur/HDR, Technicolor Rennes
+<br/>M. Moez BACCOUCHE, Chercheur/Docteur, Orange Labs Rennes
+<br/>M. Jean-Luc DUGELAY, PRU, Eurecom Sophia Antipolis
+<br/>M. Sid-Ahmed BERRANI, Directeur de l’Innovation/HDR, Algérie Télécom
+<br/>Présidente
+<br/>Rapporteur
+<br/>Rapporteur
+<br/>Examinateur
+<br/>Encadrant
+<br/>Directeur de Thèse
+<br/>Invité
+<br/>TÉLÉCOM ParisTech
+<br/>école de l’Institut Télécom - membre de ParisTech
+<br/>N°: 2009 ENAM XXXX T H È S E </td></tr><tr><td>a0c37f07710184597befaa7e6cf2f0893ff440e9</td><td></td></tr><tr><td>a0fb5b079dd1ee5ac6ac575fe29f4418fdb0e670</td><td></td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>Learning Deep Representation for Face
+<br/>Alignment with Auxiliary Attributes
+</td></tr><tr><td>a0dfb8aae58bd757b801e2dcb717a094013bc178</td><td>Reconocimiento de expresiones faciales con base
<br/>en la din´amica de puntos de referencia faciales
<br/>Instituto Nacional de Astrof´ısica ´Optica y Electr´onica,
<br/>Divisi´on de Ciencias Computacionales, Tonantzintla, Puebla,
@@ -5959,7 +7637,9 @@
<br/>Merantix GmbH
<br/>D-ITET, ETH Zurich
<br/>ESAT, KU Leuven
-</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td></td></tr><tr><td>a7a6eb53bee5e2224f2ecd56a14e3a5a717e55b9</td><td>11th International Symposium of Robotics Research (ISRR2003), pp.192-201, 2003
+</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td></td></tr><tr><td>a7664247a37a89c74d0e1a1606a99119cffc41d4</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+<br/>3287
+</td></tr><tr><td>a7a6eb53bee5e2224f2ecd56a14e3a5a717e55b9</td><td>11th International Symposium of Robotics Research (ISRR2003), pp.192-201, 2003
<br/>Face Recognition Using Multi-viewpoint Patterns for
<br/>Robot Vision
<br/>Corporate Research and Development Center, TOSHIBA Corporation
@@ -5968,15 +7648,29 @@
<br/>DD2427 Final Project Report
<br/>Human face attributes prediction with Deep
<br/>Learning
+</td></tr><tr><td>a775da3e6e6ea64bffab7f9baf665528644c7ed3</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 142 – No.9, May 2016
+<br/>Human Face Pose Estimation based on Feature
+<br/>Extraction Points
+<br/>Research scholar,
+<br/> Department of ECE
+<br/>SBSSTC, Moga Road,
+<br/> Ferozepur, Punjab, India
</td></tr><tr><td>b8dba0504d6b4b557d51a6cf4de5507141db60cf</td><td>Comparing Performances of Big Data Stream
<br/>Processing Platforms with RAM3S
-</td></tr><tr><td>b8378ab83bc165bc0e3692f2ce593dcc713df34a</td><td></td></tr><tr><td>b81cae2927598253da37954fb36a2549c5405cdb</td><td>Experiments on Visual Information Extraction with the Faces of Wikipedia
+</td></tr><tr><td>b8378ab83bc165bc0e3692f2ce593dcc713df34a</td><td></td></tr><tr><td>b8f3f6d8f188f65ca8ea2725b248397c7d1e662d</td><td>Selfie Detection by Synergy-Constriant Based
+<br/>Convolutional Neural Network
+<br/>Electrical and Electronics Engineering, NITK-Surathkal, India.
+</td></tr><tr><td>b81cae2927598253da37954fb36a2549c5405cdb</td><td>Experiments on Visual Information Extraction with the Faces of Wikipedia
<br/>D´epartement de g´enie informatique et g´enie logiciel, Polytechnique Montr´eal
<br/>2500, Chemin de Polytechnique, Universit´e de Montr´eal, Montr`eal, Qu´ebec, Canada
</td></tr><tr><td>b8a829b30381106b806066d40dd372045d49178d</td><td>1872
<br/>A Probabilistic Framework for Joint Pedestrian Head
<br/>and Body Orientation Estimation
-</td></tr><tr><td>b171f9e4245b52ff96790cf4f8d23e822c260780</td><td></td></tr><tr><td>b1a3b19700b8738b4510eecf78a35ff38406df22</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TAFFC.2017.2731763, IEEE
+</td></tr><tr><td>b1d89015f9b16515735d4140c84b0bacbbef19ac</td><td>Too Far to See? Not Really!
+<br/>— Pedestrian Detection with Scale-aware
+<br/>Localization Policy
+</td></tr><tr><td>b14b672e09b5b2d984295dfafb05604492bfaec5</td><td>LearningImageClassificationandRetrievalModelsThomasMensink </td></tr><tr><td>b171f9e4245b52ff96790cf4f8d23e822c260780</td><td></td></tr><tr><td>b1a3b19700b8738b4510eecf78a35ff38406df22</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TAFFC.2017.2731763, IEEE
<br/>Transactions on Affective Computing
<br/>JOURNAL OF LATEX CLASS FILES, VOL. 13, NO. 9, SEPTEMBER 2014
<br/>Automatic Analysis of Facial Actions: A Survey
@@ -5987,8 +7681,14 @@
</td></tr><tr><td>b19e83eda4a602abc5a8ef57467c5f47f493848d</td><td>JOURNAL OF LATEX CLASS FILES
<br/>Heat Kernel Based Local Binary Pattern for
<br/>Face Representation
+</td></tr><tr><td>dd8084b2878ca95d8f14bae73e1072922f0cc5da</td><td>Model Distillation with Knowledge Transfer from
+<br/>Face Classification to Alignment and Verification
+<br/>Beijing Orion Star Technology Co., Ltd. Beijing, China
</td></tr><tr><td>dd0760bda44d4e222c0a54d41681f97b3270122b</td><td></td></tr><tr><td>ddea3c352f5041fb34433b635399711a90fde0e8</td><td>Facial Expression Classification using Visual Cues and Language
<br/>Department of Computer Science and Engineering, IIT Kanpur
+</td></tr><tr><td>ddbd24a73ba3d74028596f393bb07a6b87a469c0</td><td>Multi-region two-stream R-CNN
+<br/>for action detection
+<br/>Inria(cid:63)
</td></tr><tr><td>ddf099f0e0631da4a6396a17829160301796151c</td><td>IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY
<br/>Learning Face Image Quality from
<br/>Human Assessments
@@ -6001,14 +7701,22 @@
</td></tr><tr><td>dd2f6a1ba3650075245a422319d86002e1e87808</td><td></td></tr><tr><td>dd8d53e67668067fd290eb500d7dfab5b6f730dd</td><td>69
<br/>A Parameter-Free Framework for General
<br/>Supervised Subspace Learning
+</td></tr><tr><td>ddbb6e0913ac127004be73e2d4097513a8f02d37</td><td>264
+<br/>IEEE TRANSACTIONS ON MULTIMEDIA, VOL. 1, NO. 3, SEPTEMBER 1999
+<br/>Face Detection Using Quantized Skin Color
+<br/>Regions Merging and Wavelet Packet Analysis
</td></tr><tr><td>dd600e7d6e4443ebe87ab864d62e2f4316431293</td><td></td></tr><tr><td>dcb44fc19c1949b1eda9abe998935d567498467d</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
<br/>1916
</td></tr><tr><td>dc77287bb1fcf64358767dc5b5a8a79ed9abaa53</td><td>Fashion Conversation Data on Instagram
<br/>∗Graduate School of Culture Technology, KAIST, South Korea
<br/>†Department of Communication Studies, UCLA, USA
-</td></tr><tr><td>dc2e805d0038f9d1b3d1bc79192f1d90f6091ecb</td><td></td></tr><tr><td>b6c047ab10dd86b1443b088029ffe05d79bbe257</td><td></td></tr><tr><td>b6c53891dff24caa1f2e690552a1a5921554f994</td><td></td></tr><tr><td>b613b30a7cbe76700855479a8d25164fa7b6b9f1</td><td>1
+</td></tr><tr><td>dc2e805d0038f9d1b3d1bc79192f1d90f6091ecb</td><td></td></tr><tr><td>dc974c31201b6da32f48ef81ae5a9042512705fe</td><td>Am I done? Predicting Action Progress in Video
+<br/>1 Media Integration and Communication Center, Univ. of Florence, Italy
+<br/>2 Department of Mathematics “Tullio Levi-Civita”, Univ. of Padova, Italy
+</td></tr><tr><td>b6c047ab10dd86b1443b088029ffe05d79bbe257</td><td></td></tr><tr><td>b6c53891dff24caa1f2e690552a1a5921554f994</td><td></td></tr><tr><td>b613b30a7cbe76700855479a8d25164fa7b6b9f1</td><td>1
<br/>Identifying User-Specific Facial Affects from
<br/>Spontaneous Expressions with Minimal Annotation
+</td></tr><tr><td>b6f682648418422e992e3ef78a6965773550d36b</td><td>February 8, 2017
</td></tr><tr><td>b656abc4d1e9c8dc699906b70d6fcd609fae8182</td><td></td></tr><tr><td>a9eb6e436cfcbded5a9f4b82f6b914c7f390adbd</td><td>(IJARAI) International Journal of Advanced Research in Artificial Intelligence,
<br/>Vol. 5, No.6, 2016
<br/>A Model for Facial Emotion Inference Based on
@@ -6029,6 +7737,9 @@
<br/>S˜ao Paulo, Brazil
<br/>S˜ao Paulo, Brazil
<br/>S˜ao Paulo, Brazil
+</td></tr><tr><td>a92adfdd8996ab2bd7cdc910ea1d3db03c66d34f</td><td></td></tr><tr><td>a98316980b126f90514f33214dde51813693fe0d</td><td>Collaborations on YouTube: From Unsupervised Detection to the
+<br/>Impact on Video and Channel Popularity
+<br/>Multimedia Communications Lab (KOM), Technische Universität Darmstadt, Germany
</td></tr><tr><td>a93781e6db8c03668f277676d901905ef44ae49f</td><td>Recent Datasets on Object Manipulation: A Survey
</td></tr><tr><td>a9adb6dcccab2d45828e11a6f152530ba8066de6</td><td>Aydınlanma Alt-uzaylarına dayalı Gürbüz Yüz Tanıma
<br/>Illumination Subspaces based Robust Face Recognition
@@ -6066,9 +7777,16 @@
<br/>ölçüde arttırdığını göstermiştir.
<br/>değişimleri,
<br/>farklı
+</td></tr><tr><td>a95dc0c4a9d882a903ce8c70e80399f38d2dcc89</td><td> TR-IIS-14-003
+<br/>Review and Implementation of
+<br/>High-Dimensional Local Binary
+<br/>Patterns and Its Application to
+<br/>Face Recognition
+<br/>July. 24, 2014 || Technical Report No. TR-IIS-14-003
+<br/>http://www.iis.sinica.edu.tw/page/library/TechReport/tr2014/tr14.html
</td></tr><tr><td>a9286519e12675302b1d7d2fe0ca3cc4dc7d17f6</td><td>Learning to Succeed while Teaching to Fail:
<br/>Privacy in Closed Machine Learning Systems
-</td></tr><tr><td>d50c6d22449cc9170ab868b42f8c72f8d31f9b6c</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+</td></tr><tr><td>a92b5234b8b73e06709dd48ec5f0ec357c1aabed</td><td></td></tr><tr><td>d50c6d22449cc9170ab868b42f8c72f8d31f9b6c</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
<br/>1668
</td></tr><tr><td>d522c162bd03e935b1417f2e564d1357e98826d2</td><td>He et al. EURASIP Journal on Advances in Signal Processing 2013, 2013:19
<br/>http://asp.eurasipjournals.com/content/2013/1/19
@@ -6141,6 +7859,11 @@
<br/>in
<br/>illumination based
<br/>is developed with the objective to
+</td></tr><tr><td>d5444f9475253bbcfef85c351ea9dab56793b9ea</td><td>IEEE TRANSACTIONS ON INTELLIGENT TRANSPORTATION SYSTEMS
+<br/>BoxCars: Improving Fine-Grained Recognition
+<br/>of Vehicles using 3D Bounding Boxes
+<br/>in Traffic Surveillance
+<br/>in contrast
</td></tr><tr><td>d5ab6aa15dad26a6ace5ab83ce62b7467a18a88e</td><td>World Journal of Computer Application and Technology 2(7): 133-138, 2014
<br/>DOI: 10.13189/wjcat.2014.020701
<br/>http://www.hrpub.org
@@ -6157,7 +7880,13 @@
<br/>Face Synthesis from Visual Attributes via Sketch using
<br/>Conditional VAEs and GANs
<br/>Received: date / Accepted: date
-</td></tr><tr><td>d5e1173dcb2a51b483f86694889b015d55094634</td><td></td></tr><tr><td>d24dafe10ec43ac8fb98715b0e0bd8e479985260</td><td>J Nonverbal Behav (2018) 42:81–99
+</td></tr><tr><td>d5e1173dcb2a51b483f86694889b015d55094634</td><td></td></tr><tr><td>d2eb1079552fb736e3ba5e494543e67620832c52</td><td>ANNUNZIATA, SAGONAS, CALÌ: DENSELY FUSED SPATIAL TRANSFORMER NETWORKS1
+<br/>DeSTNet: Densely Fused Spatial
+<br/>Transformer Networks1
+<br/>Onfido Research
+<br/>3 Finsbury Avenue
+<br/>London, UK
+</td></tr><tr><td>d24dafe10ec43ac8fb98715b0e0bd8e479985260</td><td>J Nonverbal Behav (2018) 42:81–99
<br/>https://doi.org/10.1007/s10919-017-0266-z
<br/>O R I G I N A L P A P E R
<br/>Effects of Social Anxiety on Emotional Mimicry
@@ -6166,6 +7895,27 @@
<br/>• Agneta H. Fischer2
<br/>Published online: 25 September 2017
<br/>Ó The Author(s) 2017. This article is an open access publication
+</td></tr><tr><td>d278e020be85a1ccd90aa366b70c43884dd3f798</td><td>Learning From Less Data: Diversified Subset Selection and
+<br/>Active Learning in Image Classification Tasks
+<br/>IIT Bombay
+<br/>Mumbai, Maharashtra, India
+<br/>AITOE Labs
+<br/>Mumbai, Maharashtra, India
+<br/>AITOE Labs
+<br/>Mumbai, Maharashtra, India
+<br/>Rishabh Iyer
+<br/>AITOE Labs
+<br/>Seattle, Washington, USA
+<br/>AITOE Labs
+<br/>Seattle, Washington, USA
+<br/>Narsimha Raju
+<br/>IIT Bombay
+<br/>Mumbai, Maharashtra, India
+<br/>IIT Bombay
+<br/>Mumbai, Maharashtra, India
+<br/>IIT Bombay
+<br/>Mumbai, Maharashtra, India
+<br/>May 30, 2018
</td></tr><tr><td>aafb271684a52a0b23debb3a5793eb618940c5dd</td><td></td></tr><tr><td>aa52910c8f95e91e9fc96a1aefd406ffa66d797d</td><td>FACE RECOGNITION SYSTEM BASED
<br/>ON 2DFLD AND PCA
<br/>E&TC Department
@@ -6175,6 +7925,8 @@
<br/>ME E&TC [Digital System]
<br/>Sinhgad Academy of Engineering
<br/>Pune, India
+</td></tr><tr><td>aadfcaf601630bdc2af11c00eb34220da59b7559</td><td>Multi-view Hybrid Embedding:
+<br/>A Divide-and-Conquer Approach
</td></tr><tr><td>aaa4c625f5f9b65c7f3df5c7bfe8a6595d0195a5</td><td>Biometrics in Ambient Intelligence
</td></tr><tr><td>aa331fe378056b6d6031bb8fe6676e035ed60d6d</td><td></td></tr><tr><td>aae0e417bbfba701a1183d3d92cc7ad550ee59c3</td><td>844
<br/>A Statistical Method for 2-D Facial Landmarking
@@ -6262,7 +8014,7 @@
<br/>Okhla Phase 3
<br/>Delhi, 110020, India
<br/>Delhi, 110020, India
-</td></tr><tr><td>af54dd5da722e104740f9b6f261df9d4688a9712</td><td></td></tr><tr><td>afc7092987f0d05f5685e9332d83c4b27612f964</td><td>Person-Independent Facial Expression Detection using Constrained
+</td></tr><tr><td>af6cae71f24ea8f457e581bfe1240d5fa63faaf7</td><td></td></tr><tr><td>af54dd5da722e104740f9b6f261df9d4688a9712</td><td></td></tr><tr><td>afc7092987f0d05f5685e9332d83c4b27612f964</td><td>Person-Independent Facial Expression Detection using Constrained
<br/>Local Models
</td></tr><tr><td>b730908bc1f80b711c031f3ea459e4de09a3d324</td><td>2024
<br/>Active Orientation Models for Face
@@ -6289,7 +8041,8 @@
<br/>NFRAD: Near-Infrared Face Recognition at a Distance
<br/>aDept. of Brain and Cognitive Eng. Korea Univ., Seoul, Korea
<br/>bDept. of Comp. Sci. & Eng. Michigan State Univ., E. Lansing, MI, USA 48824
-</td></tr><tr><td>b73fdae232270404f96754329a1a18768974d3f6</td><td></td></tr><tr><td>b747fcad32484dfbe29530a15776d0df5688a7db</td><td></td></tr><tr><td>b7f7a4df251ff26aca83d66d6b479f1dc6cd1085</td><td>Bouges et al. EURASIP Journal on Image and Video Processing 2013, 2013:55
+</td></tr><tr><td>b73fdae232270404f96754329a1a18768974d3f6</td><td></td></tr><tr><td>b76af8fcf9a3ebc421b075b689defb6dc4282670</td><td>Face Mask Extraction in Video Sequence
+</td></tr><tr><td>b747fcad32484dfbe29530a15776d0df5688a7db</td><td></td></tr><tr><td>b7f7a4df251ff26aca83d66d6b479f1dc6cd1085</td><td>Bouges et al. EURASIP Journal on Image and Video Processing 2013, 2013:55
<br/>http://jivp.eurasipjournals.com/content/2013/1/55
<br/>RESEARCH
<br/>Open Access
@@ -6304,7 +8057,10 @@
<br/><b></b></td></tr><tr><td>dbaf89ca98dda2c99157c46abd136ace5bdc33b3</td><td>Nonlinear Cross-View Sample Enrichment for
<br/>Action Recognition
<br/>Institut Mines-T´el´ecom; T´el´ecom ParisTech; CNRS LTCI
-</td></tr><tr><td>dbab6ac1a9516c360cdbfd5f3239a351a64adde7</td><td></td></tr><tr><td>dbb0a527612c828d43bcb9a9c41f1bf7110b1dc8</td><td>Chapter 7
+</td></tr><tr><td>dbab6ac1a9516c360cdbfd5f3239a351a64adde7</td><td></td></tr><tr><td>dbe255d3d2a5d960daaaba71cb0da292e0af36a7</td><td>Evolutionary Cost-sensitive Extreme Learning
+<br/>Machine
+<br/>1
+</td></tr><tr><td>dbb0a527612c828d43bcb9a9c41f1bf7110b1dc8</td><td>Chapter 7
<br/>Machine Learning Techniques
<br/>for Face Analysis
</td></tr><tr><td>dbb7f37fb9b41d1aa862aaf2d2e721a470fd2c57</td><td>Face Image Analysis With
@@ -6317,6 +8073,8 @@
<br/>Stefan Duffner
<br/>2007
</td></tr><tr><td>a83fc450c124b7e640adc762e95e3bb6b423b310</td><td>Deep Face Feature for Face Alignment
+</td></tr><tr><td>a85e9e11db5665c89b057a124547377d3e1c27ef</td><td>Dynamics of Driver’s Gaze: Explorations in
+<br/>Behavior Modeling & Maneuver Prediction
</td></tr><tr><td>a8117a4733cce9148c35fb6888962f665ae65b1e</td><td>IEEE TRANSACTIONS ON XXXX, VOL. XX, NO. XX, XX 201X
<br/>A Good Practice Towards Top Performance of Face
<br/>Recognition: Transferred Deep Feature Fusion
@@ -6373,8 +8131,35 @@
<br/>Simultaneously Learning Neighborship and
<br/>Projection Matrix for Supervised
<br/>Dimensionality Reduction
+</td></tr><tr><td>a8a30a8c50d9c4bb8e6d2dd84bc5b8b7f2c84dd8</td><td>This is a repository copy of Modelling of Orthogonal Craniofacial Profiles.
+<br/>White Rose Research Online URL for this paper:
+<br/>http://eprints.whiterose.ac.uk/131767/
+<br/>Version: Published Version
+<br/>Article:
+<br/>Dai, Hang, Pears, Nicholas Edwin orcid.org/0000-0001-9513-5634 and Duncan, Christian
+<br/>(2017) Modelling of Orthogonal Craniofacial Profiles. Journal of Imaging. ISSN 2313-433X
+<br/>https://doi.org/10.3390/jimaging3040055
+<br/>Reuse
+<br/>This article is distributed under the terms of the Creative Commons Attribution (CC BY) licence. This licence
+<br/>allows you to distribute, remix, tweak, and build upon the work, even commercially, as long as you credit the
+<br/>authors for the original work. More information and the full terms of the licence here:
+<br/>https://creativecommons.org/licenses/
+<br/>Takedown
+<br/>If you consider content in White Rose Research Online to be in breach of UK law, please notify us by
+<br/>https://eprints.whiterose.ac.uk/
</td></tr><tr><td>a8e75978a5335fd3deb04572bb6ca43dbfad4738</td><td>Sparse Graphical Representation based Discriminant
<br/>Analysis for Heterogeneous Face Recognition
+</td></tr><tr><td>ded968b97bd59465d5ccda4f1e441f24bac7ede5</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Large scale 3D Morphable Models
+<br/>Zafeiriou
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>de0eb358b890d92e8f67592c6e23f0e3b2ba3f66</td><td>ACCEPTED BY IEEE TRANS. PATTERN ANAL. AND MACH. INTELL.
+<br/>Inference-Based Similarity Search in
+<br/>Randomized Montgomery Domains for
+<br/>Privacy-Preserving Biometric Identification
+</td></tr><tr><td>dee406a7aaa0f4c9d64b7550e633d81bc66ff451</td><td>Content-Adaptive Sketch Portrait Generation by
+<br/>Decompositional Representation Learning
</td></tr><tr><td>dedabf9afe2ae4a1ace1279150e5f1d495e565da</td><td>3294
<br/>Robust Face Recognition With Structurally
<br/>Incoherent Low-Rank Matrix Decomposition
@@ -6383,23 +8168,51 @@
</td></tr><tr><td>ded41c9b027c8a7f4800e61b7cfb793edaeb2817</td><td></td></tr><tr><td>defa8774d3c6ad46d4db4959d8510b44751361d8</td><td>FEBEI - Face Expression Based Emoticon Identification
<br/>CS - B657 Computer Vision
<br/>Robert J Henderson - rojahend
+</td></tr><tr><td>b0c512fcfb7bd6c500429cbda963e28850f2e948</td><td></td></tr><tr><td>b09b693708f412823053508578df289b8403100a</td><td>WANG et al.: TWO-STREAM SR-CNNS FOR ACTION RECOGNITION IN VIDEOS
+<br/>Two-Stream SR-CNNs for Action
+<br/>Recognition in Videos
+<br/>1 Advanced Interactive Technologies Lab
+<br/>ETH Zurich
+<br/>Zurich, Switzerland
+<br/>2 Computer Vision Lab
+<br/>ETH Zurich
+<br/>Zurich, Switzerland
</td></tr><tr><td>b07582d1a59a9c6f029d0d8328414c7bef64dca0</td><td>Employing Fusion of Learned and Handcrafted
<br/>Features for Unconstrained Ear Recognition
<br/>Maur´ıcio Pamplona Segundo∗†
<br/>October 24, 2017
-</td></tr><tr><td>b03d6e268cde7380e090ddaea889c75f64560891</td><td></td></tr><tr><td>b0de0892d2092c8c70aa22500fed31aa7eb4dd3f</td><td>(will be inserted by the editor)
+</td></tr><tr><td>b03d6e268cde7380e090ddaea889c75f64560891</td><td></td></tr><tr><td>b0c1615ebcad516b5a26d45be58068673e2ff217</td><td>How Image Degradations Affect Deep CNN-based Face
+<br/>Recognition?
+<br/>S¸amil Karahan1 Merve Kılınc¸ Yıldırım1 Kadir Kırtac¸1 Ferhat S¸ ¨ukr¨u Rende1
+<br/>G¨ultekin B¨ut¨un1Hazım Kemal Ekenel2
+</td></tr><tr><td>b0de0892d2092c8c70aa22500fed31aa7eb4dd3f</td><td>(will be inserted by the editor)
<br/>A robust and efficient video representation for action recognition
<br/>Received: date / Accepted: date
</td></tr><tr><td>a66d89357ada66d98d242c124e1e8d96ac9b37a0</td><td>Failure Detection for Facial Landmark Detectors
<br/>Computer Vision Lab, D-ITET, ETH Zurich, Switzerland
</td></tr><tr><td>a608c5f8fd42af6e9bd332ab516c8c2af7063c61</td><td>2408
<br/>Age Estimation via Grouping and Decision Fusion
-</td></tr><tr><td>a6583c8daa7927eedb3e892a60fc88bdfe89a486</td><td></td></tr><tr><td>a694180a683f7f4361042c61648aa97d222602db</td><td>Face Recognition using Scattering Wavelet under Illicit Drug Abuse Variations
+</td></tr><tr><td>a6eb6ad9142130406fb4ffd4d60e8348c2442c29</td><td>Video Description: A Survey of Methods,
+<br/>Datasets and Evaluation Metrics
+</td></tr><tr><td>a6583c8daa7927eedb3e892a60fc88bdfe89a486</td><td></td></tr><tr><td>a6590c49e44aa4975b2b0152ee21ac8af3097d80</td><td>https://doi.org/10.1007/s11263-018-1074-6
+<br/>3D Interpreter Networks for Viewer-Centered Wireframe Modeling
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>a694180a683f7f4361042c61648aa97d222602db</td><td>Face Recognition using Scattering Wavelet under Illicit Drug Abuse Variations
<br/>IIIT-Delhi India
-</td></tr><tr><td>a6db73f10084ce6a4186363ea9d7475a9a658a11</td><td></td></tr><tr><td>a6634ff2f9c480e94ed8c01d64c9eb70e0d98487</td><td></td></tr><tr><td>b9f2a755940353549e55690437eb7e13ea226bbf</td><td>Unsupervised Feature Learning from Videos for Discovering and Recognizing Actions
+</td></tr><tr><td>a6db73f10084ce6a4186363ea9d7475a9a658a11</td><td></td></tr><tr><td>a6634ff2f9c480e94ed8c01d64c9eb70e0d98487</td><td></td></tr><tr><td>b9d0774b0321a5cfc75471b62c8c5ef6c15527f5</td><td>Fishy Faces: Crafting Adversarial Images to Poison Face Authentication
+<br/>imec-DistriNet, KU Leuven
+<br/>imec-DistriNet, KU Leuven
+<br/>imec-DistriNet, KU Leuven
+<br/>imec-DistriNet, KU Leuven
+<br/>imec-DistriNet, KU Leuven
+</td></tr><tr><td>b908edadad58c604a1e4b431f69ac8ded350589a</td><td>Deep Face Feature for Face Alignment
+</td></tr><tr><td>b9f2a755940353549e55690437eb7e13ea226bbf</td><td>Unsupervised Feature Learning from Videos for Discovering and Recognizing Actions
</td></tr><tr><td>b9cedd1960d5c025be55ade0a0aa81b75a6efa61</td><td>INEXACT KRYLOV SUBSPACE ALGORITHMS FOR LARGE
<br/>MATRIX EXPONENTIAL EIGENPROBLEM FROM
<br/>DIMENSIONALITY REDUCTION
+</td></tr><tr><td>b971266b29fcecf1d5efe1c4dcdc2355cb188ab0</td><td>MAI et al.: ON THE RECONSTRUCTION OF FACE IMAGES FROM DEEP FACE TEMPLATES
+<br/>On the Reconstruction of Face Images from
+<br/>Deep Face Templates
</td></tr><tr><td>a158c1e2993ac90a90326881dd5cb0996c20d4f3</td><td>OPEN ACCESS
<br/>ISSN 2073-8994
<br/>Article
@@ -6454,6 +8267,42 @@
<br/>duplicate detection, data deduplication, con-
<br/>densation, consolidation
<br/>image clustering,
+</td></tr><tr><td>a1132e2638a8abd08bdf7fc4884804dd6654fa63</td><td>6
+<br/>Real-Time Video Face Recognition
+<br/>for Embedded Devices
+<br/>Tessera, Galway,
+<br/>Ireland
+<br/>1. Introduction
+<br/>This chapter will address the challenges of real-time video face recognition systems
+<br/>implemented in embedded devices. Topics to be covered include: the importance and
+<br/>challenges of video face recognition in real life scenarios, describing a general architecture of
+<br/>a generic video face recognition system and a working solution suitable for recognizing
+<br/>faces in real-time using low complexity devices. Each component of the system will be
+<br/>described together with the system’s performance on a database of video samples that
+<br/>resembles real life conditions.
+<br/>2. Video face recognition
+<br/>Face recognition remains a very active topic in computer vision and receives attention from
+<br/>a large community of researchers in that discipline. Many reasons feed this interest; the
+<br/>main being the wide range of commercial, law enforcement and security applications that
+<br/>require authentication. The progress made in recent years on the methods and algorithms
+<br/>for data processing as well as the availability of new technologies makes it easier to study
+<br/>these algorithms and turn them into commercially viable product. Biometric based security
+<br/>systems are becoming more popular due to their non-invasive nature and their increasing
+<br/>reliability. Surveillance applications based on face recognition are gaining increasing
+<br/>attention after the United States’ 9/11 events and with the ongoing security threats. The
+<br/>Face Recognition Vendor Test (FRVT) (Phillips et al., 2003) includes video face recognition
+<br/>testing starting with the 2002 series of tests.
+<br/>Recently, face recognition technology was deployed in consumer applications such as
+<br/>organizing a collection of images using the faces present in the images (Picassa; Corcoran &
+<br/>Costache, 2005), prioritizing family members for best capturing conditions when taking
+<br/>pictures, or directly annotating the images as they are captured (Costache et al., 2006).
+<br/>Video face recognition, compared with more traditional still face recognition, has the main
+<br/>advantage of using multiple instances of the same individual in sequential frames for
+<br/>recognition to occur. In still recognition case, the system has only one input image to make
+<br/>the decision if the person is or is not in the database. If the image is not suitable for
+<br/>recognition (due to face orientation, expression, quality or facial occlusions) the recognition
+<br/>result will most likely be incorrect. In the video image there are multiple frames which can
+<br/>www.intechopen.com
</td></tr><tr><td>a14ae81609d09fed217aa12a4df9466553db4859</td><td>REVISED VERSION, JUNE 2011
<br/>Face Identification Using Large Feature Sets
</td></tr><tr><td>a1e97c4043d5cc9896dc60ae7ca135782d89e5fc</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
@@ -6461,18 +8310,58 @@
<br/>Personal, Social and Environmental Constraints
</td></tr><tr><td>efd308393b573e5410455960fe551160e1525f49</td><td>Tracking Persons-of-Interest via
<br/>Unsupervised Representation Adaptation
+</td></tr><tr><td>ef4ecb76413a05c96eac4c743d2c2a3886f2ae07</td><td>Modeling the Importance of Faces in Natural Images
+<br/>Jin B.a, Yildirim G.a, Lau C.a, Shaji A.a, Ortiz Segovia M.b and S¨usstrunk S.a
+<br/>aEPFL, Lausanne, Switzerland;
+<br/>bOc´e, Paris, France
+</td></tr><tr><td>ef032afa4bdb18b328ffcc60e2dc5229cc1939bc</td><td>Fang and Yuan EURASIP Journal on Image and Video
+<br/>Processing (2018) 2018:44
+<br/>https://doi.org/10.1186/s13640-018-0282-x
+<br/>EURASIP Journal on Image
+<br/>and Video Processing
+<br/>RESEARCH
+<br/>Open Access
+<br/>Attribute-enhanced metric learning for
+<br/>face retrieval
+</td></tr><tr><td>ef5531711a69ed687637c48930261769465457f0</td><td>Studio2Shop: from studio photo shoots to fashion articles
+<br/>Zalando Research, Muehlenstr. 25, 10243 Berlin, Germany
+<br/>Keywords:
+<br/>computer vision, deep learning, fashion, item recognition, street-to-shop
+</td></tr><tr><td>efa08283656714911acff2d5022f26904e451113</td><td>Active Object Localization in Visual Situations
+</td></tr><tr><td>ef999ab2f7b37f46445a3457bf6c0f5fd7b5689d</td><td>Calhoun: The NPS Institutional Archive
+<br/>DSpace Repository
+<br/>Theses and Dissertations
+<br/>1. Thesis and Dissertation Collection, all items
+<br/>2017-12
+<br/>Improving face verification in photo albums by
+<br/>combining facial recognition and metadata
+<br/>with cross-matching
+<br/>Monterey, California: Naval Postgraduate School
+<br/>http://hdl.handle.net/10945/56868
+<br/>Downloaded from NPS Archive: Calhoun
+</td></tr><tr><td>c3beae515f38daf4bd8053a7d72f6d2ed3b05d88</td><td></td></tr><tr><td>c3dc4f414f5233df96a9661609557e341b71670d</td><td>Tao et al. EURASIP Journal on Advances in Signal Processing 2011, 2011:4
+<br/>http://asp.eurasipjournals.com/content/2011/1/4
+<br/>RESEARCH
+<br/>Utterance independent bimodal emotion
+<br/>recognition in spontaneous communication
+<br/>Open Access
</td></tr><tr><td>c398684270543e97e3194674d9cce20acaef3db3</td><td>Chapter 2
<br/>Comparative Face Soft Biometrics for
<br/>Human Identification
-</td></tr><tr><td>c3418f866a86dfd947c2b548cbdeac8ca5783c15</td><td></td></tr><tr><td>c32383330df27625592134edd72d69bb6b5cff5c</td><td>422
+</td></tr><tr><td>c3285a1d6ec6972156fea9e6dc9a8d88cd001617</td><td></td></tr><tr><td>c3418f866a86dfd947c2b548cbdeac8ca5783c15</td><td></td></tr><tr><td>c32383330df27625592134edd72d69bb6b5cff5c</td><td>422
<br/>IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 42, NO. 2, APRIL 2012
<br/>Intrinsic Illumination Subspace for Lighting
<br/>Insensitive Face Recognition
-</td></tr><tr><td>c3a3f7758bccbead7c9713cb8517889ea6d04687</td><td></td></tr><tr><td>c37a971f7a57f7345fdc479fa329d9b425ee02be</td><td>A Novice Guide towards Human Motion Analysis and Understanding
+</td></tr><tr><td>c3a3f7758bccbead7c9713cb8517889ea6d04687</td><td></td></tr><tr><td>c30e4e4994b76605dcb2071954eaaea471307d80</td><td></td></tr><tr><td>c37a971f7a57f7345fdc479fa329d9b425ee02be</td><td>A Novice Guide towards Human Motion Analysis and Understanding
</td></tr><tr><td>c3638b026c7f80a2199b5ae89c8fcbedfc0bd8af</td><td></td></tr><tr><td>c3fb2399eb4bcec22723715556e31c44d086e054</td><td>499
<br/>2014 IEEE International Conference on Acoustic, Speech and Signal Processing (ICASSP)
<br/>978-1-4799-2893-4/14/$31.00 ©2014 IEEE
<br/>1. INTRODUCTION
+</td></tr><tr><td>c37de914c6e9b743d90e2566723d0062bedc9e6a</td><td>©2016 Society for Imaging Science and Technology
+<br/>DOI: 10.2352/ISSN.2470-1173.2016.11.IMAWM-455
+<br/>Joint and Discriminative Dictionary Learning
+<br/>Expression Recognition
+<br/>for Facial
</td></tr><tr><td>c4f1fcd0a5cdaad8b920ee8188a8557b6086c1a4</td><td>Int J Comput Vis (2014) 108:3–29
<br/>DOI 10.1007/s11263-014-0698-4
<br/>The Ignorant Led by the Blind: A Hybrid Human–Machine Vision
@@ -6519,9 +8408,73 @@
<br/>approach, where understanding activity is centered on
</td></tr><tr><td>c49aed65fcf9ded15c44f9cbb4b161f851c6fa88</td><td>Multiscale Facial Expression Recognition using Convolutional Neural Networks
<br/>IDIAP, Martigny, Switzerland
+</td></tr><tr><td>eac6aee477446a67d491ef7c95abb21867cf71fc</td><td>JOURNAL
+<br/>A survey of sparse representation: algorithms and
+<br/>applications
</td></tr><tr><td>ea482bf1e2b5b44c520fc77eab288caf8b3f367a</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
<br/>2592
-</td></tr><tr><td>ea85378a6549bb9eb9bcc13e31aa6a61b655a9af</td><td>Diplomarbeit
+</td></tr><tr><td>eafda8a94e410f1ad53b3e193ec124e80d57d095</td><td>Jeffrey F. Cohn
+<br/>13
+<br/>Observer-Based Measurement of Facial Expression
+<br/>With the Facial Action Coding System
+<br/>Facial expression has been a focus of emotion research for over
+<br/>a hundred years (Darwin, 1872/1998). It is central to several
+<br/>leading theories of emotion (Ekman, 1992; Izard, 1977;
+<br/>Tomkins, 1962) and has been the focus of at times heated
+<br/>debate about issues in emotion science (Ekman, 1973, 1993;
+<br/>Fridlund, 1992; Russell, 1994). Facial expression figures
+<br/>prominently in research on almost every aspect of emotion,
+<br/>including psychophysiology (Levenson, Ekman, & Friesen,
+<br/>1990), neural bases (Calder et al., 1996; Davidson, Ekman,
+<br/>Saron, Senulis, & Friesen, 1990), development (Malatesta,
+<br/>Culver, Tesman, & Shephard, 1989; Matias & Cohn, 1993),
+<br/>perception (Ambadar, Schooler, & Cohn, 2005), social pro-
+<br/>cesses (Hatfield, Cacioppo, & Rapson, 1992; Hess & Kirouac,
+<br/>2000), and emotion disorder (Kaiser, 2002; Sloan, Straussa,
+<br/>Quirka, & Sajatovic, 1997), to name a few.
+<br/>Because of its importance to the study of emotion, a num-
+<br/>ber of observer-based systems of facial expression measure-
+<br/>ment have been developed (Ekman & Friesen, 1978, 1982;
+<br/>Ekman, Friesen, & Tomkins, 1971; Izard, 1979, 1983; Izard
+<br/>& Dougherty, 1981; Kring & Sloan, 1991; Tronick, Als, &
+<br/>Brazelton, 1980). Of these various systems for describing
+<br/>facial expression, the Facial Action Coding System (FACS;
+<br/>Ekman & Friesen, 1978; Ekman, Friesen, & Hager, 2002) is
+<br/>the most comprehensive, psychometrically rigorous, and
+<br/>widely used (Cohn & Ekman, 2005; Ekman & Rosenberg,
+<br/>2005). Using FACS and viewing video-recorded facial behav-
+<br/>ior at frame rate and slow motion, coders can manually code
+<br/>nearly all possible facial expressions, which are decomposed
+<br/>into action units (AUs). Action units, with some qualifica-
+<br/>tions, are the smallest visually discriminable facial move-
+<br/>ments. By comparison, other systems are less thorough
+<br/>(Malatesta et al., 1989), fail to differentiate between some
+<br/>anatomically distinct movements (Oster, Hegley, & Nagel,
+<br/>1992), consider movements that are not anatomically dis-
+<br/>tinct as separable (Oster et al., 1992), and often assume a one-
+<br/>to-one mapping between facial expression and emotion (for
+<br/>a review of these systems, see Cohn & Ekman, in press).
+<br/>Unlike systems that use emotion labels to describe ex-
+<br/>pression, FACS explicitly distinguishes between facial actions
+<br/>and inferences about what they mean. FACS itself is descrip-
+<br/>tive and includes no emotion-specified descriptors. Hypoth-
+<br/>eses and inferences about the emotional meaning of facial
+<br/>actions are extrinsic to FACS. If one wishes to make emo-
+<br/>tion-based inferences from FACS codes, a variety of related
+<br/>resources exist. These include the FACS Investigators’ Guide
+<br/>(Ekman et al., 2002), the FACS interpretive database (Ekman,
+<br/>Rosenberg, & Hager, 1998), and a large body of empirical
+<br/>research.(Ekman & Rosenberg, 2005). These resources sug-
+<br/>gest combination rules for defining emotion-specified expres-
+<br/>sions from FACS action units, but this inferential step remains
+<br/>extrinsic to FACS. Because of its descriptive power, FACS
+<br/>is regarded by many as the standard measure for facial be-
+<br/>havior and is used widely in diverse fields. Beyond emo-
+<br/>tion science, these include facial neuromuscular disorders
+<br/>(Van Swearingen & Cohn, 2005), neuroscience (Bruce &
+<br/>Young, 1998; Rinn, 1984, 1991), computer vision (Bartlett,
+<br/>203
+<br/>UNPROOFED PAGES </td></tr><tr><td>ea85378a6549bb9eb9bcc13e31aa6a61b655a9af</td><td>Diplomarbeit
<br/>Template Protection for PCA-LDA-based 3D
<br/>Face Recognition System
<br/>von
@@ -6543,7 +8496,16 @@
<br/>Sabbir Ahmmed
<br/>TU Berlin
<br/>TU Berlin
-</td></tr><tr><td>e16efd2ae73a325b7571a456618bfa682b51aef8</td><td></td></tr><tr><td>e13360cda1ebd6fa5c3f3386c0862f292e4dbee4</td><td></td></tr><tr><td>e1e6e6792e92f7110e26e27e80e0c30ec36ac9c2</td><td>TSINGHUA SCIENCE AND TECHNOLOGY
+</td></tr><tr><td>e16efd2ae73a325b7571a456618bfa682b51aef8</td><td></td></tr><tr><td>e19ebad4739d59f999d192bac7d596b20b887f78</td><td>Learning Gating ConvNet for Two-Stream based Methods in Action
+<br/>Recognition
+</td></tr><tr><td>e13360cda1ebd6fa5c3f3386c0862f292e4dbee4</td><td></td></tr><tr><td>e1d726d812554f2b2b92cac3a4d2bec678969368</td><td>J Electr Eng Technol.2015; 10(?): 30-40
+<br/>http://dx.doi.org/10.5370/JEET.2015.10.2.030
+<br/>ISSN(Print)
+<br/>1975-0102
+<br/>ISSN(Online) 2093-7423
+<br/>Human Action Recognition Bases on Local Action Attributes
+<br/>and Mohan S Kankanhalli**
+</td></tr><tr><td>e1e6e6792e92f7110e26e27e80e0c30ec36ac9c2</td><td>TSINGHUA SCIENCE AND TECHNOLOGY
<br/>ISSNll1007-0214
<br/>0?/?? pp???–???
<br/>DOI: 10.26599/TST.2018.9010000
@@ -6585,6 +8547,8 @@
</td></tr><tr><td>cda4fb9df653b5721ad4fe8b4a88468a410e55ec</td><td>Gabor wavelet transform and its application
</td></tr><tr><td>cd3005753012409361aba17f3f766e33e3a7320d</td><td>Multilinear Biased Discriminant Analysis: A Novel Method for Facial
<br/>Action Unit Representation
+</td></tr><tr><td>cd7a7be3804fd217e9f10682e0c0bfd9583a08db</td><td>Women also Snowboard:
+<br/>Overcoming Bias in Captioning Models
</td></tr><tr><td>ccfcbf0eda6df876f0170bdb4d7b4ab4e7676f18</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 6, NO. 1, JUNE 2011
<br/>A Dynamic Appearance Descriptor Approach to
<br/>Facial Actions Temporal Modelling
@@ -6604,9 +8568,28 @@
<br/>Cinema and other aspects of film and video creation.
<br/>PROJECT DATE 2014
<br/>URL http://misharabinovich.com/soyummy.html
+</td></tr><tr><td>cc8e378fd05152a81c2810f682a78c5057c8a735</td><td>International Journal of Computer Sciences and Engineering Open Access
+<br/> Research Paper Volume-5, Issue-12 E-ISSN: 2347-2693
+<br/>Expression Invariant Face Recognition System based on Topographic
+<br/>Independent Component Analysis and Inner Product Classifier
+<br/>
+<br/>Department of Electrical Engineering, IIT Delhi, New Delhi, India
+<br/>Available online at: www.ijcseonline.org
+<br/>Received: 07/Nov/2017, Revised: 22/Nov/2017, Accepted: 14/Dec/2017, Published: 31/Dec/2017
+</td></tr><tr><td>cc31db984282bb70946f6881bab741aa841d3a7c</td><td>ALBANIE, VEDALDI: LEARNING GRIMACES BY WATCHING TV
+<br/>Learning Grimaces by Watching TV
+<br/>http://www.robots.ox.ac.uk/~albanie
+<br/>http://www.robots.ox.ac.uk/~vedaldi
+<br/>Engineering Science Department
+<br/>Univeristy of Oxford
+<br/>Oxford, UK
</td></tr><tr><td>cc8bf03b3f5800ac23e1a833447c421440d92197</td><td></td></tr><tr><td>cc96eab1e55e771e417b758119ce5d7ef1722b43</td><td>An Empirical Study of Recent
<br/>Face Alignment Methods
</td></tr><tr><td>e64b683e32525643a9ddb6b6af8b0472ef5b6a37</td><td>Face Recognition and Retrieval in Video
+</td></tr><tr><td>e6b45d5a86092bbfdcd6c3c54cda3d6c3ac6b227</td><td>Pairwise Relational Networks for Face
+<br/>Recognition
+<br/>1 Department of Creative IT Engineering, POSTECH, Korea
+<br/>2 Department of Computer Science and Engineering, POSTECH, Korea
</td></tr><tr><td>e6865b000cf4d4e84c3fe895b7ddfc65a9c4aaec</td><td>Chapter 15. The critical role of the
<br/>cold-start problem and incentive systems
<br/>in emotional Web 2.0 services
@@ -6616,10 +8599,55 @@
<br/>Dimension Reduction
<br/>0 =
<br/>, the linear regression function (
-</td></tr><tr><td>e6540d70e5ffeed9f447602ea3455c7f0b38113e</td><td></td></tr><tr><td>e6ee36444038de5885473693fb206f49c1369138</td><td></td></tr><tr><td>f913bb65b62b0a6391ffa8f59b1d5527b7eba948</td><td></td></tr><tr><td>f96bdd1e2a940030fb0a89abbe6c69b8d7f6f0c1</td><td></td></tr><tr><td>f06b015bb19bd3c39ac5b1e4320566f8d83a0c84</td><td></td></tr><tr><td>f0a3f12469fa55ad0d40c21212d18c02be0d1264</td><td>Sparsity Sharing Embedding for Face
+</td></tr><tr><td>e6e5a6090016810fb902b51d5baa2469ae28b8a1</td><td>Title
+<br/>Energy-Efficient Deep In-memory Architecture for NAND
+<br/>Flash Memories
+<br/>Archived version
+<br/>Accepted manuscript: the content is same as the published
+<br/>paper but without the final typesetting by the publisher
+<br/>Published version
+<br/>DOI
+<br/>Published paper
+<br/>URL
+<br/>Authors (contact)
+<br/>10.1109/ISCAS.2018.8351458
+</td></tr><tr><td>e6540d70e5ffeed9f447602ea3455c7f0b38113e</td><td></td></tr><tr><td>e6ee36444038de5885473693fb206f49c1369138</td><td></td></tr><tr><td>f913bb65b62b0a6391ffa8f59b1d5527b7eba948</td><td></td></tr><tr><td>f96bdd1e2a940030fb0a89abbe6c69b8d7f6f0c1</td><td></td></tr><tr><td>f0cee87e9ecedeb927664b8da44b8649050e1c86</td><td></td></tr><tr><td>f0f4f16d5b5f9efe304369120651fa688a03d495</td><td>Temporal Generative Adversarial Nets
+<br/>Preferred Networks inc., Japan
+</td></tr><tr><td>f06b015bb19bd3c39ac5b1e4320566f8d83a0c84</td><td></td></tr><tr><td>f0a3f12469fa55ad0d40c21212d18c02be0d1264</td><td>Sparsity Sharing Embedding for Face
<br/>Verification
<br/>Department of Electrical Engineering, KAIST, Daejeon, Korea
-</td></tr><tr><td>f7452a12f9bd927398e036ea6ede02da79097e6e</td><td></td></tr><tr><td>f7de943aa75406fe5568fdbb08133ce0f9a765d4</td><td>Project 1.5: Human Identification at a Distance - Hornak, Adjeroh, Cukic, Gautum, & Ross
+</td></tr><tr><td>f7dea4454c2de0b96ab5cf95008ce7144292e52a</td><td></td></tr><tr><td>f7b422df567ce9813926461251517761e3e6cda0</td><td>FACE AGING WITH CONDITIONAL GENERATIVE ADVERSARIAL NETWORKS
+<br/>(cid:63) Orange Labs, 4 rue Clos Courtel, 35512 Cesson-S´evign´e, France
+<br/>† Eurecom, 450 route des Chappes, 06410 Biot, France
+</td></tr><tr><td>f79c97e7c3f9a98cf6f4a5d2431f149ffacae48f</td><td>Provided by the author(s) and NUI Galway in accordance with publisher policies. Please cite the published
+<br/>version when available.
+<br/>Title
+<br/>On color texture normalization for active appearance models
+<br/>Author(s)
+<br/>Ionita, Mircea C.; Corcoran, Peter M.; Buzuloiu, Vasile
+<br/>Publication
+<br/>Date
+<br/>2009-05-12
+<br/>Publication
+<br/>Information
+<br/>Ionita, M. C., Corcoran, P., & Buzuloiu, V. (2009). On Color
+<br/>Texture Normalization for Active Appearance Models. Image
+<br/>Processing, IEEE Transactions on, 18(6), 1372-1378.
+<br/>Publisher
+<br/>IEEE
+<br/>Link to
+<br/>publisher's
+<br/>version
+<br/>http://dx.doi.org/10.1109/TIP.2009.2017163
+<br/>Item record
+<br/>http://hdl.handle.net/10379/1350
+<br/>Some rights reserved. For more information, please see the item record link above.
+<br/>Downloaded 2017-06-17T22:38:27Z
+</td></tr><tr><td>f7452a12f9bd927398e036ea6ede02da79097e6e</td><td></td></tr><tr><td>f7dcadc5288653ec6764600c7c1e2b49c305dfaa</td><td>Copyright
+<br/>by
+<br/>Adriana Ivanova Kovashka
+<br/>2014
+</td></tr><tr><td>f7de943aa75406fe5568fdbb08133ce0f9a765d4</td><td>Project 1.5: Human Identification at a Distance - Hornak, Adjeroh, Cukic, Gautum, & Ross
<br/>Project 1.5
<br/>Biometric Identification and Surveillance1
<br/>Year 5 Deliverable 
@@ -6677,9 +8705,63 @@
<br/>
</td></tr><tr><td>f78863f4e7c4c57744715abe524ae4256be884a9</td><td></td></tr><tr><td>f77c9bf5beec7c975584e8087aae8d679664a1eb</td><td>Local Deep Neural Networks for Age and Gender Classification
<br/>March 27, 2017
-</td></tr><tr><td>e8410c4cd1689829c15bd1f34995eb3bd4321069</td><td></td></tr><tr><td>e8b2a98f87b7b2593b4a046464c1ec63bfd13b51</td><td>CMS-RCNN: Contextual Multi-Scale
+</td></tr><tr><td>e8410c4cd1689829c15bd1f34995eb3bd4321069</td><td></td></tr><tr><td>e8fdacbd708feb60fd6e7843b048bf3c4387c6db</td><td>Deep Learning
+<br/>Hinnerup Net A/S
+<br/>www.hinnerup.net
+<br/>July 4, 2014
+<br/>Introduction
+<br/>Deep learning is a topic in the field of artificial intelligence (AI) and is a relatively
+<br/>new research area although based on the popular artificial neural networks (supposedly
+<br/>mirroring brain function). With the development of the perceptron in the 1950s and
+<br/>1960s by Frank RosenBlatt, research began on artificial neural networks. To further
+<br/>mimic the architectural depth of the brain, researchers wanted to train a deep multi-
+<br/>layer neural network – this, however, did not happen until Geoffrey Hinton in 2006
+<br/>introduced Deep Belief Networks [1].
+<br/>Recently, the topic of deep learning has gained public interest. Large web companies such
+<br/>as Google and Facebook have a focused research on AI and an ever increasing amount
+<br/>of compute power, which has led to researchers finally being able to produce results
+<br/>that are of interest to the general public. In July 2012 Google trained a deep learning
+<br/>network on YouTube videos with the remarkable result that the network learned to
+<br/>recognize humans as well as cats [6], and in January this year Google successfully used
+<br/>deep learning on Street View images to automatically recognize house numbers with
+<br/>an accuracy comparable to that of a human operator [5]. In March this year Facebook
+<br/>announced their DeepFace algorithm that is able to match faces in photos with Facebook
+<br/>users almost as accurately as a human can do [9].
+<br/>Deep learning and other AI are here to stay and will become more and more present in
+<br/>our daily lives, so we had better make ourselves acquainted with the technology. Let’s
+<br/>dive into the deep water and try not to drown!
+<br/>Data Representations
+<br/>Before presenting data to an AI algorithm, we would normally prepare the data to make
+<br/>it feasible to work with. For instance, if the data consists of images, we would take each
+</td></tr><tr><td>e8b2a98f87b7b2593b4a046464c1ec63bfd13b51</td><td>CMS-RCNN: Contextual Multi-Scale
<br/>Region-based CNN for Unconstrained Face
<br/>Detection
+</td></tr><tr><td>e8c6c3fc9b52dffb15fe115702c6f159d955d308</td><td>13
+<br/>Linear Subspace Learning for
+<br/>Facial Expression Analysis
+<br/>Philips Research
+<br/>The Netherlands
+<br/>1. Introduction
+<br/>Facial expression, resulting from movements of the facial muscles, is one of the most
+<br/>powerful, natural, and immediate means for human beings to communicate their emotions
+<br/>and intentions. Some examples of facial expressions are shown in Fig. 1. Darwin (1872) was
+<br/>the first to describe in detail the specific facial expressions associated with emotions in
+<br/>animals and humans; he argued that all mammals show emotions reliably in their faces.
+<br/>Psychological studies (Mehrabian, 1968; Ambady & Rosenthal, 1992) indicate that facial
+<br/>expressions, with other non-verbal cues, play a major and fundamental role in face-to-face
+<br/>communication.
+<br/>Fig. 1. Facial expressions of George W. Bush.
+<br/>Machine analysis of facial expressions, enabling computers to analyze and interpret facial
+<br/>expressions as humans do, has many important applications including intelligent human-
+<br/>computer interaction, computer animation, surveillance and security, medical diagnosis,
+<br/>law enforcement, and awareness system (Shan, 2007). Driven by its potential applications
+<br/>and theoretical interests of cognitive and psychological scientists, automatic facial
+<br/>expression analysis has attracted much attention in last two decades (Pantic & Rothkrantz,
+<br/>2000a; Fasel & Luettin, 2003; Tian et al, 2005; Pantic & Bartlett, 2007). It has been studied in
+<br/>multiple disciplines such as psychology, cognitive science, computer vision, pattern
+<br/>Source: Machine Learning, Book edited by: Abdelhamid Mellouk and Abdennacer Chebira,
+<br/> ISBN 978-3-902613-56-1, pp. 450, February 2009, I-Tech, Vienna, Austria
+<br/>www.intechopen.com
</td></tr><tr><td>fab83bf8d7cab8fe069796b33d2a6bd70c8cefc6</td><td>Draft: Evaluation Guidelines for Gender
<br/>Classification and Age Estimation
<br/>July 1, 2011
@@ -6744,13 +8826,23 @@
<br/>each of spatial size 4×4. All layers except the last one are batch normalized followed by a ReLU activation.
<br/>The last layer is followed by Tanh activation, generating an RGB image with values in range [−1, 1]. All
<br/>the layers use a stride of 2 and padding of 1, excluding the first one which does not use stride or padding.
-</td></tr><tr><td>faead8f2eb54c7bc33bc7d0569adc7a4c2ec4c3b</td><td></td></tr><tr><td>ff8315c1a0587563510195356c9153729b533c5b</td><td>432
+</td></tr><tr><td>faead8f2eb54c7bc33bc7d0569adc7a4c2ec4c3b</td><td></td></tr><tr><td>faf5583063682e70dedc4466ac0f74eeb63169e7</td><td></td></tr><tr><td>fad895771260048f58d12158a4d4d6d0623f4158</td><td>Audio-Visual Emotion
+<br/>Recognition For Natural
+<br/>Human-Robot Interaction
+<br/>Dissertation zur Erlangung des akademischen Grades
+<br/>Doktor der Ingenieurwissenschaften (Dr.-Ing.)
+<br/>vorgelegt von
+<br/>an der Technischen Fakultät der Universität Bielefeld
+<br/>15. März 2010
+</td></tr><tr><td>ff8315c1a0587563510195356c9153729b533c5b</td><td>432
<br/>Zapping Index:Using Smile to Measure
<br/>Advertisement Zapping Likelihood
</td></tr><tr><td>ff44d8938c52cfdca48c80f8e1618bbcbf91cb2a</td><td>Towards Video Captioning with Naming: a
<br/>Novel Dataset and a Multi-Modal Approach
<br/>Dipartimento di Ingegneria “Enzo Ferrari”
<br/>Universit`a degli Studi di Modena e Reggio Emilia
+</td></tr><tr><td>fffefc1fb840da63e17428fd5de6e79feb726894</td><td>Fine-Grained Age Estimation in the wild with
+<br/>Attention LSTM Networks
</td></tr><tr><td>ff398e7b6584d9a692e70c2170b4eecaddd78357</td><td></td></tr><tr><td>ffd81d784549ee51a9b0b7b8aaf20d5581031b74</td><td>Performance Analysis of Retina and DoG
<br/>Filtering Applied to Face Images for Training
<br/>Correlation Filters
@@ -6762,7 +8854,9 @@
<br/>2 Facultad de Ingenier(cid:19)(cid:16)a, Arquitectura y Dise~no, Universidad Aut(cid:19)onoma de Baja
<br/>California, Carretera Transpeninsular Tijuana-Ensenada, N(cid:19)um. 3917, Colonia
<br/>Playitas, Ensenada, Baja California, C.P. 22860
-</td></tr><tr><td>ff60d4601adabe04214c67e12253ea3359f4e082</td><td></td></tr><tr><td>ffcbedb92e76fbab083bb2c57d846a2a96b5ae30</td><td></td></tr><tr><td>c50d73557be96907f88b59cfbd1ab1b2fd696d41</td><td>JournalofElectronicImaging13(3),474–485(July2004).
+</td></tr><tr><td>ff60d4601adabe04214c67e12253ea3359f4e082</td><td></td></tr><tr><td>ff8ef43168b9c8dd467208a0b1b02e223b731254</td><td>BreakingNews: Article Annotation by
+<br/>Image and Text Processing
+</td></tr><tr><td>ffcbedb92e76fbab083bb2c57d846a2a96b5ae30</td><td></td></tr><tr><td>c50d73557be96907f88b59cfbd1ab1b2fd696d41</td><td>JournalofElectronicImaging13(3),474–485(July2004).
<br/>Semiconductor sidewall shape estimation
<br/>Oak Ridge National Laboratory
<br/>Oak Ridge, Tennessee 37831-6010
@@ -6797,18 +8891,23 @@
</td></tr><tr><td>c220f457ad0b28886f8b3ef41f012dd0236cd91a</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
<br/>Crystal Loss and Quality Pooling for
<br/>Unconstrained Face Verification and Recognition
+</td></tr><tr><td>c254b4c0f6d5a5a45680eb3742907ec93c3a222b</td><td>A Fusion-based Gender Recognition Method
+<br/>Using Facial Images
</td></tr><tr><td>c28461e266fe0f03c0f9a9525a266aa3050229f0</td><td>Automatic Detection of Facial Feature Points via
<br/>HOGs and Geometric Prior Models
<br/>1 Computer Vision Center , Universitat Aut`onoma de Barcelona
<br/>2 Universitat Oberta de Catalunya
<br/>3 Dept. de Matem`atica Aplicada i An`alisi
<br/>Universitat de Barcelona
-</td></tr><tr><td>c29e33fbd078d9a8ab7adbc74b03d4f830714cd0</td><td></td></tr><tr><td>f6ca29516cce3fa346673a2aec550d8e671929a6</td><td>International Journal of Engineering and Advanced Technology (IJEAT)
+</td></tr><tr><td>c29e33fbd078d9a8ab7adbc74b03d4f830714cd0</td><td></td></tr><tr><td>f68ed499e9d41f9c3d16d843db75dc12833d988d</td><td></td></tr><tr><td>f6ca29516cce3fa346673a2aec550d8e671929a6</td><td>International Journal of Engineering and Advanced Technology (IJEAT)
<br/>ISSN: 2249 – 8958, Volume-2, Issue-4, April 2013
<br/>Algorithm for Face Matching Using Normalized
<br/>Cross-Correlation
<br/>
-</td></tr><tr><td>f6c70635241968a6d5fd5e03cde6907022091d64</td><td></td></tr><tr><td>f6abecc1f48f6ec6eede4143af33cc936f14d0d0</td><td></td></tr><tr><td>f6fa97fbfa07691bc9ff28caf93d0998a767a5c1</td><td>k2-means for fast and accurate large scale clustering
+</td></tr><tr><td>f6c70635241968a6d5fd5e03cde6907022091d64</td><td></td></tr><tr><td>f6ce34d6e4e445cc2c8a9b8ba624e971dd4144ca</td><td>Cross-label Suppression: A Discriminative and Fast
+<br/>Dictionary Learning with Group Regularization
+<br/>April 24, 2017
+</td></tr><tr><td>f6abecc1f48f6ec6eede4143af33cc936f14d0d0</td><td></td></tr><tr><td>f6fa97fbfa07691bc9ff28caf93d0998a767a5c1</td><td>k2-means for fast and accurate large scale clustering
<br/>Computer Vision Lab
<br/>D-ITET
<br/>ETH Zurich
@@ -6822,6 +8921,11 @@
<br/>Cognitive Learning for Social Robot through
<br/>Facial Expression from Video Input
<br/>1Department of Automation & Robotics, 2Department of Computer Science & Engg.
+</td></tr><tr><td>e988be047b28ba3b2f1e4cdba3e8c94026139fcf</td><td>Multi-Task Convolutional Neural Network for
+<br/>Pose-Invariant Face Recognition
+</td></tr><tr><td>e9d43231a403b4409633594fa6ccc518f035a135</td><td>Deformable Part Models with CNN Features
+<br/>Kokkinos1,2
+<br/>1 Ecole Centrale Paris,2 INRIA, 3TTI-Chicago (cid:63)
</td></tr><tr><td>e9fcd15bcb0f65565138dda292e0c71ef25ea8bb</td><td>Repositorio Institucional de la Universidad Autónoma de Madrid
<br/>https://repositorio.uam.es
<br/>Esta es la versión de autor de la comunicación de congreso publicada en:
@@ -6833,6 +8937,10 @@
<br/>Copyright: © 2013 Springer-Verlag
<br/>El acceso a la versión del editor puede requerir la suscripción del recurso
<br/>Access to the published version may require subscription
+</td></tr><tr><td>e9363f4368b04aeaa6d6617db0a574844fc59338</td><td>BENCHIP: Benchmarking Intelligence
+<br/>Processors
+<br/>1ICT CAS,2Cambricon,3Alibaba Infrastructure Service, Alibaba Group
+<br/>4IFLYTEK,5JD,6RDA Microelectronics,7AMD
</td></tr><tr><td>f16a605abb5857c39a10709bd9f9d14cdaa7918f</td><td>Fast greyscale road sign model matching
<br/>and recognition
<br/>Centre de Visió per Computador
@@ -6856,6 +8964,8 @@
<br/>reicht und durch die Fakult¨at f¨ur Elektrotechnik und Informationstechnik am 30.10.2009
<br/>angenommen.
</td></tr><tr><td>e76798bddd0f12ae03de26b7c7743c008d505215</td><td></td></tr><tr><td>e726acda15d41b992b5a41feabd43617fab6dc23</td><td></td></tr><tr><td>e7b6887cd06d0c1aa4902335f7893d7640aef823</td><td>Modelling of Facial Aging and Kinship: A Survey
+</td></tr><tr><td>cb004e9706f12d1de83b88c209ac948b137caae0</td><td>Face Aging Effect Simulation using Hidden Factor
+<br/>Analysis Joint Sparse Representation
</td></tr><tr><td>cb9092fe74ea6a5b2bb56e9226f1c88f96094388</td><td></td></tr><tr><td>cb08f679f2cb29c7aa972d66fe9e9996c8dfae00</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 13, NO. 9, SEPTEMBER 2014
<br/>Action Understanding
<br/>with Multiple Classes of Actors
@@ -6999,10 +9109,30 @@
<br/>and convert all the natural language descriptions
<br/>to lower case and tokenize the sentences and
<br/>remove punctuations.
+</td></tr><tr><td>e096b11b3988441c0995c13742ad188a80f2b461</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>DeepProposals: Hunting Objects and Actions by Cascading
+<br/>Deep Convolutional Layers
+<br/>Van Gool
+<br/>Received: date / Accepted: date
</td></tr><tr><td>e0c081a007435e0c64e208e9918ca727e2c1c44e</td><td></td></tr><tr><td>e00d4e4ba25fff3583b180db078ef962bf7d6824</td><td>Preprints (www.preprints.org) | NOT PEER-REVIEWED | Posted: 20 March 2017 doi:10.20944/preprints201703.0152.v1
<br/>Article
<br/>Face Verification with Multi-Task and Multi-Scale
<br/>Features Fusion
+</td></tr><tr><td>e0939b4518a5ad649ba04194f74f3413c793f28e</td><td>Technical Report
+<br/>UCAM-CL-TR-636
+<br/>ISSN 1476-2986
+<br/>Number 636
+<br/>Computer Laboratory
+<br/>Mind-reading machines:
+<br/>automated inference
+<br/>of complex mental states
+<br/>July 2005
+<br/>15 JJ Thomson Avenue
+<br/>Cambridge CB3 0FD
+<br/>United Kingdom
+<br/>phone +44 1223 763500
+<br/>http://www.cl.cam.ac.uk/
</td></tr><tr><td>e0765de5cabe7e287582532456d7f4815acd74c1</td><td></td></tr><tr><td>e013c650c7c6b480a1b692bedb663947cd9d260f</td><td>860
<br/>Robust Image Analysis With Sparse Representation
<br/>on Quantized Visual Features
@@ -7040,7 +9170,12 @@
<br/>Seattle, Washington, May 26-30, 2015
<br/>978-1-4799-6922-7/15/$31.00 ©2015 IEEE
<br/>3039
-</td></tr><tr><td>2c61a9e26557dd0fe824909adeadf22a6a0d86b0</td><td></td></tr><tr><td>2c2786ea6386f2d611fc9dbf209362699b104f83</td><td></td></tr><tr><td>2c848cc514293414d916c0e5931baf1e8583eabc</td><td>An automatic facial expression recognition system
+</td></tr><tr><td>2c61a9e26557dd0fe824909adeadf22a6a0d86b0</td><td></td></tr><tr><td>2c93c8da5dfe5c50119949881f90ac5a0a4f39fe</td><td>Advanced local motion patterns for macro and micro facial
+<br/>expression recognition
+<br/>B. Allaerta,∗, IM. Bilascoa, C. Djerabaa
+<br/>aUniv. Lille, CNRS, Centrale Lille, UMR 9189 - CRIStAL -
+<br/>Centre de Recherche en Informatique Signal et Automatique de Lille, F-59000 Lille, France
+</td></tr><tr><td>2c2786ea6386f2d611fc9dbf209362699b104f83</td><td></td></tr><tr><td>2c848cc514293414d916c0e5931baf1e8583eabc</td><td>An automatic facial expression recognition system
<br/>evaluated by different classifiers
<br/>∗Programa de P´os-Graduac¸˜ao em Mecatrˆonica
<br/>Universidade Federal da Bahia,
@@ -7068,6 +9203,8 @@
<br/>redistribution to servers or lists, or to reuse any copyrighted
<br/>component of this work in other works must be obtained from
<br/>the IEEE.
+</td></tr><tr><td>2c5d1e0719f3ad7f66e1763685ae536806f0c23b</td><td>AENet: Learning Deep Audio Features for Video
+<br/>Analysis
</td></tr><tr><td>2c8f24f859bbbc4193d4d83645ef467bcf25adc2</td><td>845
<br/>Classification in the Presence of
<br/>Label Noise: a Survey
@@ -7081,7 +9218,10 @@
<br/>Unit detection
<br/>1 Sorbonne Universités, UPMC Univ Paris 06, CNRS, ISIR UMR 7222
<br/>4 place Jussieu 75005 Paris
-</td></tr><tr><td>795ea140df2c3d29753f40ccc4952ef24f46576c</td><td></td></tr><tr><td>79b669abf65c2ca323098cf3f19fa7bdd837ff31</td><td> Deakin Research Online
+</td></tr><tr><td>79f6a8f777a11fd626185ab549079236629431ac</td><td>Copyright
+<br/>by
+<br/>2013
+</td></tr><tr><td>795ea140df2c3d29753f40ccc4952ef24f46576c</td><td></td></tr><tr><td>79dc84a3bf76f1cb983902e2591d913cee5bdb0e</td><td></td></tr><tr><td>79b669abf65c2ca323098cf3f19fa7bdd837ff31</td><td> Deakin Research Online
<br/>This is the published version:
<br/>Rana, Santu, Liu, Wanquan, Lazarescu, Mihai and Venkatesh, Svetha 2008, Efficient tensor
<br/>based face recognition, in ICPR 2008 : Proceedings of the 19th International Conference on
@@ -7095,7 +9235,7 @@
<br/>resale or redistribution to servers or lists, or to reuse any copyrighted component of this work
<br/>in other works must be obtained from the IEEE.
<br/>Copyright : 2008, IEEE
-</td></tr><tr><td>79dd787b2877cf9ce08762d702589543bda373be</td><td>Face Detection Using SURF Cascade
+</td></tr><tr><td>79c3a7131c6c176b02b97d368cd0cd0bc713ff7e</td><td></td></tr><tr><td>79dd787b2877cf9ce08762d702589543bda373be</td><td>Face Detection Using SURF Cascade
<br/>Intel Labs China
</td></tr><tr><td>793e7f1ba18848908da30cbad14323b0389fd2a8</td><td></td></tr><tr><td>2dd6c988b279d89ab5fb5155baba65ce4ce53c1e</td><td></td></tr><tr><td>2d294c58b2afb529b26c49d3c92293431f5f98d0</td><td>4413
<br/>Maximum Margin Projection Subspace Learning
@@ -7130,6 +9270,9 @@
<br/>analysis
<br/>for
<br/>information
+</td></tr><tr><td>2d8d089d368f2982748fde93a959cf5944873673</td><td>Proceedings of NAACL-HLT 2018, pages 788–794
+<br/>New Orleans, Louisiana, June 1 - 6, 2018. c(cid:13)2018 Association for Computational Linguistics
+<br/>788
</td></tr><tr><td>2df4d05119fe3fbf1f8112b3ad901c33728b498a</td><td>Facial landmark detection using structured output deep
<br/>neural networks
<br/>Soufiane Belharbi ∗1, Cl´ement Chatelain∗1, Romain H´erault∗1, and S´ebastien
@@ -7145,6 +9288,9 @@
<br/>1P.G. Student, Department of Computer Engineering, MCERC, Nashik (M.S.), India.
<br/>2Associate Professor, Department of Computer Engineering, MCERC, Nashik (M.S.),
<br/>India
+</td></tr><tr><td>414715421e01e8c8b5743c5330e6d2553a08c16d</td><td>PoTion: Pose MoTion Representation for Action Recognition
+<br/>1Inria∗
+<br/>2NAVER LABS Europe
</td></tr><tr><td>41ab4939db641fa4d327071ae9bb0df4a612dc89</td><td>Interpreting Face Images by Fitting a Fast
<br/>Illumination-Based 3D Active Appearance
<br/>Model
@@ -7208,6 +9354,13 @@
<br/>Computer Science Depatment, Universit¨at Karlsruhe (TH)
<br/>Am Fasanengarten 5, Karlsruhe 76131, Germany
<br/>http://isl.ira.uka.de/cvhci
+</td></tr><tr><td>1b55c4e804d1298cbbb9c507497177014a923d22</td><td>Incremental Class Representation
+<br/>Learning for Face Recognition
+<br/>Degree’s Thesis
+<br/>Audiovisual Systems Engineering
+<br/>Author:
+<br/>Universitat Politècnica de Catalunya (UPC)
+<br/>2016 - 2017
</td></tr><tr><td>1bd50926079e68a6e32dc4412e9d5abe331daefb</td><td></td></tr><tr><td>1b150248d856f95da8316da868532a4286b9d58e</td><td>Analyzing 3D Objects in Cluttered Images
<br/>UC Irvine
<br/>UC Irvine
@@ -7285,7 +9438,9 @@
<br/>DECISION TREES
<br/>Commission II, WG II/5
<br/>KEY WORDS: Face Detection, Cascade Algorithm, Decision Trees.
-</td></tr><tr><td>1b79628af96eb3ad64dbb859dae64f31a09027d5</td><td></td></tr><tr><td>1bc23c771688109bed9fd295ce82d7e702726327</td><td></td></tr><tr><td>1b589016fbabe607a1fb7ce0c265442be9caf3a9</td><td></td></tr><tr><td>1b27ca161d2e1d4dd7d22b1247acee5c53db5104</td><td></td></tr><tr><td>7711a7404f1f1ac3a0107203936e6332f50ac30c</td><td>Action Classification and Highlighting in Videos
+</td></tr><tr><td>1b79628af96eb3ad64dbb859dae64f31a09027d5</td><td></td></tr><tr><td>1b4f6f73c70353869026e5eec1dd903f9e26d43f</td><td>Robust Subjective Visual Property Prediction
+<br/>from Crowdsourced Pairwise Labels
+</td></tr><tr><td>1bc23c771688109bed9fd295ce82d7e702726327</td><td></td></tr><tr><td>1b589016fbabe607a1fb7ce0c265442be9caf3a9</td><td></td></tr><tr><td>1b27ca161d2e1d4dd7d22b1247acee5c53db5104</td><td></td></tr><tr><td>7711a7404f1f1ac3a0107203936e6332f50ac30c</td><td>Action Classification and Highlighting in Videos
<br/>Disney Research Pittsburgh
<br/>Disney Research Pittsburgh
</td></tr><tr><td>778c9f88839eb26129427e1b8633caa4bd4d275e</td><td>Pose Pooling Kernels for Sub-category Recognition
@@ -7293,6 +9448,9 @@
<br/>ICSI & UC Berkeley
<br/>Trever Darrell
<br/>ICSI & UC Berkeley
+</td></tr><tr><td>7789a5d87884f8bafec8a82085292e87d4e2866f</td><td>A Unified Tensor-based Active Appearance Face
+<br/>Model
+<br/>Member, IEEE
</td></tr><tr><td>776835eb176ed4655d6e6c308ab203126194c41e</td><td></td></tr><tr><td>778bff335ae1b77fd7ec67404f71a1446624331b</td><td>Hough Forest-based Facial Expression Recognition from
<br/>Video Sequences
<br/>BIWI, ETH Zurich http://www.vision.ee.ethz.ch
@@ -7302,6 +9460,13 @@
<br/>†ETH Zurich
</td></tr><tr><td>7754b708d6258fb8279aa5667ce805e9f925dfd0</td><td>Facial Action Unit Recognition by Exploiting
<br/>Their Dynamic and Semantic Relationships
+</td></tr><tr><td>77db171a523fc3d08c91cea94c9562f3edce56e1</td><td>Poursaberi et al. EURASIP Journal on Image and Video Processing 2012, 2012:17
+<br/>http://jivp.eurasipjournals.com/content/2012/1/17
+<br/>R ES EAR CH
+<br/>Open Access
+<br/>Gauss–Laguerre wavelet textural feature fusion
+<br/>with geometrical information for facial expression
+<br/>identification
</td></tr><tr><td>77037a22c9b8169930d74d2ce6f50f1a999c1221</td><td>Robust Face Recognition With Kernelized
<br/>Locality-Sensitive Group Sparsity Representation
</td></tr><tr><td>77d31d2ec25df44781d999d6ff980183093fb3de</td><td>The Multiverse Loss for Robust Transfer Learning
@@ -7437,6 +9602,17 @@
<br/>skills
<br/>1Laboratoire LIRIS, ´Ecole centrale de Lyon, 69134 Ecully, France.
<br/>2Safran Identity & Security, 92130 Issy-les-Moulineaux, France.
+</td></tr><tr><td>48186494fc7c0cc664edec16ce582b3fcb5249c0</td><td>P-CNN: Pose-based CNN Features for Action Recognition
+<br/>Guilhem Ch´eron∗ †
+<br/>INRIA
+</td></tr><tr><td>48499deeaa1e31ac22c901d115b8b9867f89f952</td><td>Interim Report of Final Year Project
+<br/>HKU-Face: A Large Scale Dataset for
+<br/>Deep Face Recognition
+<br/>3035140108
+<br/>Haoyu Li
+<br/>3035141841
+<br/>COMP4801 Final Year Project
+<br/>Project Code: 17007
</td></tr><tr><td>486a82f50835ea888fbc5c6babf3cf8e8b9807bc</td><td>MSU TECHNICAL REPORT MSU-CSE-15-11, JULY 24, 2015
<br/>Face Search at Scale: 80 Million Gallery
</td></tr><tr><td>4866a5d6d7a40a26f038fc743e16345c064e9842</td><td></td></tr><tr><td>487df616e981557c8e1201829a1d0ec1ecb7d275</td><td>Acoustic Echo Cancellation Using a Vector-Space-Based
@@ -7450,6 +9626,10 @@
<br/>Feature Extraction and Kernel Fisher Analysis
</td></tr><tr><td>70f189798c8b9f2b31c8b5566a5cf3107050b349</td><td>The Challenge of Face Recognition from Digital Point-and-Shoot Cameras
<br/>David Bolme‡
+</td></tr><tr><td>70109c670471db2e0ede3842cbb58ba6be804561</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Zero-Shot Visual Recognition via Bidirectional Latent Embedding
+<br/>Received: date / Accepted: date
</td></tr><tr><td>703890b7a50d6535900a5883e8d2a6813ead3a03</td><td></td></tr><tr><td>706236308e1c8d8b8ba7749869c6b9c25fa9f957</td><td>Crowdsourced Data Collection of Facial Responses
<br/>MIT Media Lab
<br/>Cambridge
@@ -7614,6 +9794,10 @@
<br/>EXPRESSION RECOGNITION
<br/>USING C-SUPPORT VECTOR
<br/>CLASSIFICATION
+</td></tr><tr><td>1e21b925b65303ef0299af65e018ec1e1b9b8d60</td><td>Under review as a conference paper at ICLR 2017
+<br/>UNSUPERVISED CROSS-DOMAIN IMAGE GENERATION
+<br/>Facebook AI Research
+<br/>Tel-Aviv, Israel
</td></tr><tr><td>1ee27c66fabde8ffe90bd2f4ccee5835f8dedbb9</td><td>Entropy Regularization
<br/>The problem of semi-supervised induction consists in learning a decision rule from
<br/>labeled and unlabeled data. This task can be undertaken by discriminative methods,
@@ -7642,6 +9826,26 @@
<br/>examples. The problem di(cid:11)ers in the respect that the supervisor’s responses are
<br/>missing for some training examples. This characteristic is shared with transduction,
<br/>which has however a di(cid:11)erent goal, that is, of predicting labels on a set of prede(cid:12)ned
+</td></tr><tr><td>1ee3b4ba04e54bfbacba94d54bf8d05fd202931d</td><td>Indonesian Journal of Electrical Engineering and Computer Science
+<br/>Vol. 12, No. 2, November 2018, pp. 476~481
+<br/>ISSN: 2502-4752, DOI: 10.11591/ijeecs.v12.i2.pp476-481
+<br/> 476
+<br/>Celebrity Face Recognition using Deep Learning
+<br/>1,2,3Faculty of Computer and Mathematical Sciences, UniversitiTeknologi MARA (UiTM),
+<br/>4Faculty of Computer and Mathematical Sciences, UniversitiTeknologi MARA (UiTM),
+<br/> Shah Alam, Selangor, Malaysia
+<br/>Campus Jasin, Melaka, Malaysia
+<br/>Article Info
+<br/>Article history:
+<br/>Received May 29, 2018
+<br/>Revised Jul 30, 2018
+<br/>Accepted Aug 3, 2018
+<br/>Keywords:
+<br/>AlexNet
+<br/>Convolutional neural network
+<br/>Deep learning
+<br/>Face recognition
+<br/>GoogLeNet
</td></tr><tr><td>1e41a3fdaac9f306c0ef0a978ae050d884d77d2a</td><td>411
<br/>Robust Object Recognition with
<br/>Cortex-Like Mechanisms
@@ -7650,6 +9854,16 @@
<br/>FROM FACE IMAGES
<br/>VALWAY Technology Center, NEC Soft, Ltd., Tokyo, Japan
<br/>Keywords:
+</td></tr><tr><td>1efaa128378f988965841eb3f49d1319a102dc36</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>Hierarchical binary CNNs for landmark
+<br/>localization with limited resources
+</td></tr><tr><td>8451bf3dd6bcd946be14b1a75af8bbb65a42d4b2</td><td>Consensual and Privacy-Preserving Sharing of
+<br/>Multi-Subject and Interdependent Data
+<br/>EPFL, UNIL–HEC Lausanne
+<br/>K´evin Huguenin
+<br/>UNIL–HEC Lausanne
+<br/>EPFL
+<br/>EPFL
</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td></td></tr><tr><td>84e4b7469f9c4b6c9e73733fa28788730fd30379</td><td>Duong et al. EURASIP Journal on Advances in Signal Processing (2018) 2018:10
<br/>DOI 10.1186/s13634-017-0521-9
<br/>EURASIP Journal on Advances
@@ -7658,13 +9872,22 @@
<br/>Projective complex matrix factorization for
<br/>facial expression recognition
<br/>Open Access
-</td></tr><tr><td>84dcf04802743d9907b5b3ae28b19cbbacd97981</td><td></td></tr><tr><td>841a5de1d71a0b51957d9be9d9bebed33fb5d9fa</td><td>5017
+</td></tr><tr><td>84dcf04802743d9907b5b3ae28b19cbbacd97981</td><td></td></tr><tr><td>84fa126cb19d569d2f0147bf6f9e26b54c9ad4f1</td><td>Improved Boosting Performance by Explicit
+<br/>Handling of Ambiguous Positive Examples
+</td></tr><tr><td>841a5de1d71a0b51957d9be9d9bebed33fb5d9fa</td><td>5017
<br/>PCANet: A Simple Deep Learning Baseline for
<br/>Image Classification?
+</td></tr><tr><td>849f891973ad2b6c6f70d7d43d9ac5805f1a1a5b</td><td>Detecting Faces Using Region-based Fully
+<br/>Convolutional Networks
+<br/>Tencent AI Lab, China
</td></tr><tr><td>4adca62f888226d3a16654ca499bf2a7d3d11b71</td><td>Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics, pages 572–582,
<br/>Sofia, Bulgaria, August 4-9 2013. c(cid:13)2013 Association for Computational Linguistics
<br/>572
</td></tr><tr><td>4a2d54ea1da851151d43b38652b7ea30cdb6dfb2</td><td>Direct Recognition of Motion Blurred Faces
+</td></tr><tr><td>4a3758f283b7c484d3f164528d73bc8667eb1591</td><td>Attribute Enhanced Face Aging with Wavelet-based Generative Adversarial
+<br/>Networks
+<br/>Center for Research on Intelligent Perception and Computing, CASIA
+<br/>National Laboratory of Pattern Recognition, CASIA
</td></tr><tr><td>4abd49538d04ea5c7e6d31701b57ea17bc349412</td><td>Recognizing Fine-Grained and Composite Activities
<br/>using Hand-Centric Features and Script Data
</td></tr><tr><td>4a0f98d7dbc31497106d4f652968c708f7da6692</td><td>Real-time Eye Gaze Direction Classification Using
@@ -7680,6 +9903,8 @@
<br/>3D Morphable Shape Model
</td></tr><tr><td>4a6fcf714f663618657effc341ae5961784504c7</td><td>Scaling up Class-Specific Kernel Discriminant
<br/>Analysis for large-scale Face Verification
+</td></tr><tr><td>24115d209e0733e319e39badc5411bbfd82c5133</td><td>Long-term Recurrent Convolutional Networks for
+<br/>Visual Recognition and Description
</td></tr><tr><td>24c442ac3f6802296d71b1a1914b5d44e48b4f29</td><td>Pose and expression-coherent face recovery in the wild
<br/>Technicolor, Cesson-S´evign´e, France
<br/>Franc¸ois Le Clerc
@@ -7698,7 +9923,10 @@
<br/>4, rue du Clos Courtel
<br/>35512 Cesson-S´evign´e, France
</td></tr><tr><td>244b57cc4a00076efd5f913cc2833138087e1258</td><td>Warped Convolutions: Efficient Invariance to Spatial Transformations
-</td></tr><tr><td>24d376e4d580fb28fd66bc5e7681f1a8db3b6b78</td><td></td></tr><tr><td>24bf94f8090daf9bda56d54e42009067839b20df</td><td></td></tr><tr><td>230527d37421c28b7387c54e203deda64564e1b7</td><td>Person Re-identification: System Design and
+</td></tr><tr><td>24869258fef8f47623b5ef43bd978a525f0af60e</td><td><b>UNIVERSITÉDEGRENOBLENoattribuéparlabibliothèqueTHÈSEpourobtenirlegradedeDOCTEURDEL’UNIVERSITÉDEGRENOBLESpécialité:MathématiquesetInformatiquepréparéeauLaboratoireJeanKuntzmanndanslecadredel’ÉcoleDoctoraleMathématiques,SciencesetTechnologiesdel’Information,InformatiqueprésentéeetsoutenuepubliquementparMatthieuGuillauminle27septembre2010ExploitingMultimodalDataforImageUnderstandingDonnéesmultimodalespourl’analysed’imageDirecteursdethèse:CordeliaSchmidetJakobVerbeekJURYM.ÉricGaussierUniversitéJosephFourierPrésidentM.AntonioTorralbaMassachusettsInstituteofTechnologyRapporteurMmeTinneTuytelaarsKatholiekeUniversiteitLeuvenRapporteurM.MarkEveringhamUniversityofLeedsExaminateurMmeCordeliaSchmidINRIAGrenobleExaminatriceM.JakobVerbeekINRIAGrenobleExaminateur</b></td></tr><tr><td>24d376e4d580fb28fd66bc5e7681f1a8db3b6b78</td><td></td></tr><tr><td>24ff832171cb774087a614152c21f54589bf7523</td><td>Beat-Event Detection in Action Movie Franchises
+<br/>Jerome Revaud
+<br/>Zaid Harchaoui
+</td></tr><tr><td>24bf94f8090daf9bda56d54e42009067839b20df</td><td></td></tr><tr><td>230527d37421c28b7387c54e203deda64564e1b7</td><td>Person Re-identification: System Design and
<br/>Evaluation Overview
</td></tr><tr><td>23fdbef123bcda0f07d940c72f3b15704fd49a98</td><td></td></tr><tr><td>23ebbbba11c6ca785b0589543bf5675883283a57</td><td></td></tr><tr><td>23172f9a397f13ae1ecb5793efd81b6aba9b4537</td><td>Proceedings of the 2015 Workshop on Vision and Language (VL’15), pages 10–17,
<br/>Lisbon, Portugal, 18 September 2015. c(cid:13)2015 Association for Computational Linguistics.
@@ -7808,7 +10036,9 @@
<br/>Influence of low resolution of images on reliability
<br/>of face detection and recognition
<br/>© The Author(s) 2013. This article is published with open access at SpringerLink.com
-</td></tr><tr><td>4fd29e5f4b7186e349ba34ea30738af7860cf21f</td><td></td></tr><tr><td>4f6adc53798d9da26369bea5a0d91ed5e1314df2</td><td>IEEE TRANSACTIONS ON SIGNAL PROCESSING, VOL. , NO. , 2016
+</td></tr><tr><td>4fd29e5f4b7186e349ba34ea30738af7860cf21f</td><td></td></tr><tr><td>4f051022de100241e5a4ba8a7514db9167eabf6e</td><td>Face Parsing via a Fully-Convolutional Continuous
+<br/>CRF Neural Network
+</td></tr><tr><td>4f6adc53798d9da26369bea5a0d91ed5e1314df2</td><td>IEEE TRANSACTIONS ON SIGNAL PROCESSING, VOL. , NO. , 2016
<br/>Online Nonnegative Matrix Factorization with
<br/>General Divergences
</td></tr><tr><td>4fbef7ce1809d102215453c34bf22b5f9f9aab26</td><td></td></tr><tr><td>4fa0d73b8ba114578744c2ebaf610d2ca9694f45</td><td></td></tr><tr><td>4f591e243a8f38ee3152300bbf42899ac5aae0a5</td><td>SUBMITTED TO TPAMI
@@ -7818,7 +10048,20 @@
<br/>Robotics and Embedded Systems Lab, Department of Computer Science
<br/>Image Understanding and Knowledge-Based Systems, Department of Computer Science
<br/>Technische Universit¨at M¨unchen, Germany
-</td></tr><tr><td>4f0bf2508ae801aee082b37f684085adf0d06d23</td><td></td></tr><tr><td>8d71872d5877c575a52f71ad445c7e5124a4b174</td><td></td></tr><tr><td>8de06a584955f04f399c10f09f2eed77722f6b1c</td><td>Author manuscript, published in "International Conference on Computer Vision Theory and Applications (VISAPP 2013) (2013)"
+</td></tr><tr><td>4f0bf2508ae801aee082b37f684085adf0d06d23</td><td></td></tr><tr><td>4f4f920eb43399d8d05b42808e45b56bdd36a929</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 123 – No.4, August 2015
+<br/>A Novel Method for 3D Image Segmentation with Fusion
+<br/>of Two Images using Color K-means Algorithm
+<br/>Neelam Kushwah
+<br/>Dept. of CSE
+<br/>ITM Universe
+<br/>Gwalior
+<br/>Priusha Narwariya
+<br/>Dept. of CSE
+<br/>ITM Universe
+<br/>Gwalior
+<br/>two
+</td></tr><tr><td>8d71872d5877c575a52f71ad445c7e5124a4b174</td><td></td></tr><tr><td>8de06a584955f04f399c10f09f2eed77722f6b1c</td><td>Author manuscript, published in "International Conference on Computer Vision Theory and Applications (VISAPP 2013) (2013)"
</td></tr><tr><td>8d4f0517eae232913bf27f516101a75da3249d15</td><td>ARXIV SUBMISSION, MARCH 2018
<br/>Event-based Dynamic Face Detection and
<br/>Tracking Based on Activity
@@ -8002,6 +10245,8 @@
</td></tr><tr><td>153f5ad54dd101f7f9c2ae17e96c69fe84aa9de4</td><td>Overview of algorithms for face detection and
<br/>tracking
<br/>Nenad Markuˇs
+</td></tr><tr><td>15136c2f94fd29fc1cb6bedc8c1831b7002930a6</td><td>Deep Learning Architectures for Face
+<br/>Recognition in Video Surveillance
</td></tr><tr><td>153e5cddb79ac31154737b3e025b4fb639b3c9e7</td><td>PREPRINT SUBMITTED TO IEEE TRANSACTIONS ON NEURAL NETWORKS AND LEARNING SYSTEMS
<br/>Active Dictionary Learning in Sparse
<br/>Representation Based Classification
@@ -8015,7 +10260,7 @@
<br/>The OU-ISIR Gait Database comprising the
<br/>Large Population Dataset with Age and
<br/>performance evaluation of age estimation
-</td></tr><tr><td>15f3d47b48a7bcbe877f596cb2cfa76e798c6452</td><td>Automatic face analysis tools for interactive digital games
+</td></tr><tr><td>15aa6c457678e25f6bc0e818e5fc39e42dd8e533</td><td></td></tr><tr><td>15f3d47b48a7bcbe877f596cb2cfa76e798c6452</td><td>Automatic face analysis tools for interactive digital games
<br/>Anonymised for blind review
<br/>Anonymous
<br/>Anonymous
@@ -8026,7 +10271,7 @@
<br/>Technical Report
<br/>TU M¨unchen
<br/>April 5, 2007
-</td></tr><tr><td>12cb3bf6abf63d190f849880b1703ccc183692fe</td><td>Guess Who?: A game to crowdsource the labeling of affective facial
+</td></tr><tr><td>1287bfe73e381cc8042ac0cc27868ae086e1ce3b</td><td></td></tr><tr><td>12cb3bf6abf63d190f849880b1703ccc183692fe</td><td>Guess Who?: A game to crowdsource the labeling of affective facial
<br/>expressions is comparable to expert ratings.
<br/>Graduation research project, june 2012
<br/>Supervised by: Dr. Joost Broekens
@@ -8037,6 +10282,27 @@
<br/>Multiview Facial Landmark Localization in RGB-D
<br/>Images via Hierarchical Regression
<br/>With Binary Patterns
+</td></tr><tr><td>120785f9b4952734818245cc305148676563a99b</td><td>Diagnostic automatique de l’état dépressif
+<br/>S. Cholet
+<br/>H. Paugam-Moisy
+<br/>Laboratoire de Mathématiques Informatique et Applications (LAMIA - EA 4540)
+<br/>Université des Antilles, Campus de Fouillole - Guadeloupe
+<br/>Résumé
+<br/>Les troubles psychosociaux sont un problème de santé pu-
+<br/>blique majeur, pouvant avoir des conséquences graves sur
+<br/>le court ou le long terme, tant sur le plan professionnel que
+<br/>personnel ou familial. Le diagnostic de ces troubles doit
+<br/>être établi par un professionnel. Toutefois, l’IA (l’Intelli-
+<br/>gence Artificielle) peut apporter une contribution en four-
+<br/>nissant au praticien une aide au diagnostic, et au patient
+<br/>un suivi permanent rapide et peu coûteux. Nous proposons
+<br/>une approche vers une méthode de diagnostic automatique
+<br/>de l’état dépressif à partir d’observations du visage en
+<br/>temps réel, au moyen d’une simple webcam. A partir de
+<br/>vidéos du challenge AVEC’2014, nous avons entraîné un
+<br/>classifieur neuronal à extraire des prototypes de visages
+<br/>selon différentes valeurs du score de dépression de Beck
+<br/>(BDI-II).
</td></tr><tr><td>12c713166c46ac87f452e0ae383d04fb44fe4eb2</td><td></td></tr><tr><td>12150d8b51a2158e574e006d4fbdd3f3d01edc93</td><td>Deep End2End Voxel2Voxel Prediction
<br/>Presented by: Ahmed Osman
<br/>Ahmed Osman
@@ -8051,6 +10317,9 @@
<br/>Tom 53(67), Fascicola 1-2, 2008
<br/>Facial Expression Recognition under Noisy Environment
<br/>Using Gabor Filters
+</td></tr><tr><td>8ce9b7b52d05701d5ef4a573095db66ce60a7e1c</td><td>Structured Sparse Subspace Clustering: A Joint
+<br/>Affinity Learning and Subspace Clustering
+<br/>Framework
</td></tr><tr><td>8c6c0783d90e4591a407a239bf6684960b72f34e</td><td>SESSION
<br/>KNOWLEDGE ENGINEERING AND
<br/>MANAGEMENT + KNOWLEDGE ACQUISITION
@@ -8082,6 +10351,9 @@
<br/>GREYC, CNRS UMR 6072, ENSICAEN
<br/>Université de Caen Basse-Normandie
<br/>France
+</td></tr><tr><td>1d776bfe627f1a051099997114ba04678c45f0f5</td><td>Deployment of Customized Deep Learning based
+<br/>Video Analytics On Surveillance Cameras
+<br/>AitoeLabs (www.aitoelabs.com)
</td></tr><tr><td>1d3e01d5e2721dcfafe5a3b39c54ee1c980350bb</td><td></td></tr><tr><td>1de8f38c35f14a27831130060810cf9471a62b45</td><td>Int J Comput Vis
<br/>DOI 10.1007/s11263-017-0989-7
<br/>A Branch-and-Bound Framework for Unsupervised Common
@@ -8098,6 +10370,9 @@
<br/>Kuntzmann,
<br/>655 avenue de l'Europe, Montbonnot 38330, France
</td></tr><tr><td>71b376dbfa43a62d19ae614c87dd0b5f1312c966</td><td>The Temporal Connection Between Smiles and Blinks
+</td></tr><tr><td>714d487571ca0d676bad75c8fa622d6f50df953b</td><td>eBear: An Expressive Bear-Like Robot
+</td></tr><tr><td>710011644006c18291ad512456b7580095d628a2</td><td>Learning Residual Images for Face Attribute Manipulation
+<br/>Fujitsu Research & Development Center, Beijing, China.
</td></tr><tr><td>76fd801981fd69ff1b18319c450cb80c4bc78959</td><td>Proceedings of the 11th International Conference on Computational Semantics, pages 76–81,
<br/>London, UK, April 15-17 2015. c(cid:13)2015 Association for Computational Linguistics
<br/>76
@@ -8192,6 +10467,32 @@
<br/>from a fixed amount of training data. Unless a lot
<br/>EEE A&E SYSTEMS MAGAZINE VOL. 19, NO. 1 JANUARY 2004 PART 2: TUTORIALS-BAGGENSTOSS
<br/>37
+</td></tr><tr><td>766728bac030b169fcbc2fbafe24c6e22a58ef3c</td><td>A survey of deep facial landmark detection
+<br/>Yongzhe Yan1,2
+<br/>Thierry Chateau1
+<br/>1 Université Clermont Auvergne, France
+<br/>2 Wisimage, France
+<br/>3 Université de Lyon, CNRS, INSA Lyon, LIRIS, UMR5205, Lyon, France
+<br/>Résumé
+<br/>La détection de landmarks joue un rôle crucial dans de
+<br/>nombreuses applications d’analyse du visage comme la
+<br/>reconnaissance de l’identité, des expressions, l’animation
+<br/>d’avatar, la reconstruction 3D du visage, ainsi que pour
+<br/>les applications de réalité augmentée comme la pose de
+<br/>masque ou de maquillage virtuel. L’avènement de l’ap-
+<br/>prentissage profond a permis des progrès très importants
+<br/>dans ce domaine, y compris sur les corpus non contraints
+<br/>(in-the-wild). Nous présentons ici un état de l’art cen-
+<br/>tré sur la détection 2D dans une image fixe, et les mé-
+<br/>thodes spécifiques pour la vidéo. Nous présentons ensuite
+<br/>les corpus existants pour ces trois tâches, ainsi que les mé-
+<br/>triques d’évaluations associées. Nous exposons finalement
+<br/>quelques résultats, ainsi que quelques pistes de recherche.
+<br/>Mots Clef
+<br/>Détection de landmark facial, Alignement de visage, Deep
+<br/>learning
+</td></tr><tr><td>7697295ee6fc817296bed816ac5cae97644c2d5b</td><td>Detecting and Recognizing Human-Object Interactions
+<br/>Facebook AI Research (FAIR)
</td></tr><tr><td>1c80bc91c74d4984e6422e7b0856cf3cf28df1fb</td><td>Noname manuscript No.
<br/>(will be inserted by the editor)
<br/>Hierarchical Adaptive Structural SVM for Domain Adaptation
@@ -8296,7 +10597,9 @@
<br/>Hybrid networks are particularly adapted to our client-
</td></tr><tr><td>1c3073b57000f9b6dbf1c5681c52d17c55d60fd7</td><td>THÈSEprésentéepourl’obtentiondutitredeDOCTEURDEL’ÉCOLENATIONALEDESPONTSETCHAUSSÉESSpécialité:InformatiqueparCharlotteGHYSAnalyse,Reconstruction3D,&AnimationduVisageAnalysis,3DReconstruction,&AnimationofFacesSoutenancele19mai2010devantlejurycomposéde:Rapporteurs:MajaPANTICDimitrisSAMARASExaminateurs:MichelBARLAUDRenaudKERIVENDirectiondethèse:NikosPARAGIOSBénédicteBASCLE </td></tr><tr><td>1c93b48abdd3ef1021599095a1a5ab5e0e020dd5</td><td>JOURNAL OF LATEX CLASS FILES, VOL. *, NO. *, JANUARY 2009
<br/>A Compositional and Dynamic Model for Face Aging
-</td></tr><tr><td>1c6be6874e150898d9db984dd546e9e85c85724e</td><td></td></tr><tr><td>1c65f3b3c70e1ea89114f955624d7adab620a013</td><td></td></tr><tr><td>82bef8481207de9970c4dc8b1d0e17dced706352</td><td></td></tr><tr><td>82d2af2ffa106160a183371946e466021876870d</td><td>A Novel Space-Time Representation on the Positive Semidefinite Cone
+</td></tr><tr><td>1c6be6874e150898d9db984dd546e9e85c85724e</td><td></td></tr><tr><td>1c65f3b3c70e1ea89114f955624d7adab620a013</td><td></td></tr><tr><td>1c6e22516ceb5c97c3caf07a9bd5df357988ceda</td><td></td></tr><tr><td>82bef8481207de9970c4dc8b1d0e17dced706352</td><td></td></tr><tr><td>825f56ff489cdd3bcc41e76426d0070754eab1a8</td><td>Making Convolutional Networks Recurrent for Visual Sequence Learning
+<br/>NVIDIA
+</td></tr><tr><td>82d2af2ffa106160a183371946e466021876870d</td><td>A Novel Space-Time Representation on the Positive Semidefinite Cone
<br/>for Facial Expression Recognition
<br/>1IMT Lille Douai, Univ. Lille, CNRS, UMR 9189 – CRIStAL –
<br/>Centre de Recherche en Informatique Signal et Automatique de Lille, F-59000 Lille, France
@@ -8340,9 +10643,16 @@
<br/>Journal Computer Vision, Vol. 25, No. 1, pp. 23-48, 1997.
<br/>10.
<br/>Recognition using a State-Based Model of Spatially-Localized Facial
+</td></tr><tr><td>82417d8ec8ac6406f2d55774a35af2a1b3f4b66e</td><td>Some faces are more equal than others:
+<br/>Hierarchical organization for accurate and
+<br/>efficient large-scale identity-based face retrieval
+<br/>GREYC, CNRS UMR 6072, Universit´e de Caen Basse-Normandie, France1
+<br/>Technicolor, Rennes, France2
</td></tr><tr><td>826c66bd182b54fea3617192a242de1e4f16d020</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
<br/>1602
<br/>ICASSP 2017
+</td></tr><tr><td>4972aadcce369a8c0029e6dc2f288dfd0241e144</td><td>Multi-target Unsupervised Domain Adaptation
+<br/>without Exactly Shared Categories
</td></tr><tr><td>49dd4b359f8014e85ed7c106e7848049f852a304</td><td></td></tr><tr><td>49e85869fa2cbb31e2fd761951d0cdfa741d95f3</td><td>253
<br/>Adaptive Manifold Learning
</td></tr><tr><td>49659fb64b1d47fdd569e41a8a6da6aa76612903</td><td></td></tr><tr><td>49a7949fabcdf01bbae1c2eb38946ee99f491857</td><td>A CONCATENATING FRAMEWORK OF SHORTCUT
@@ -8352,6 +10662,10 @@
<br/>Support Vector Machine for age classification
<br/>1Assistant Professor, CSE, RSR RCET, Kohka Bhilai
<br/>2,3 Sr. Assistant Professor, CSE, SSCET, Junwani Bhilai
+</td></tr><tr><td>49df381ea2a1e7f4059346311f1f9f45dd997164</td><td>2018
+<br/>On the Use of Client-Specific Information for Face
+<br/>Presentation Attack Detection Based on Anomaly
+<br/>Detection
</td></tr><tr><td>40205181ed1406a6f101c5e38c5b4b9b583d06bc</td><td>Using Context to Recognize People in Consumer Images
</td></tr><tr><td>40dab43abef32deaf875c2652133ea1e2c089223</td><td>Noname manuscript No.
<br/>(will be inserted by the editor)
@@ -8405,6 +10719,11 @@
<br/>2 Environment Perception, Group Research, Daimler AG, Ulm, Germany
<br/>3 Intelligent Systems Lab, Faculty of Science, Univ. of Amsterdam, The Netherlands
</td></tr><tr><td>40cd062438c280c76110e7a3a0b2cf5ef675052c</td><td></td></tr><tr><td>40a1935753cf91f29ffe25f6c9dde2dc49bf2a3a</td><td>80
+</td></tr><tr><td>40a34d4eea5e32dfbcef420ffe2ce7c1ee0f23cd</td><td>Bridging Heterogeneous Domains With Parallel Transport For Vision and
+<br/>Multimedia Applications
+<br/>Dept. of Video and Multimedia Technologies Research
+<br/>AT&T Labs-Research
+<br/>San Francisco, CA 94108
</td></tr><tr><td>40389b941a6901c190fb74e95dc170166fd7639d</td><td>Automatic Facial Expression Recognition
<br/>Emotient
<br/>http://emotient.com
@@ -8433,6 +10752,9 @@
<br/>detectors embedded in digital cameras [62]. Nonetheless, considerable progress has yet to be
<br/>made: Methods for face detection and tracking (the first step of automated face analysis)
<br/>work well for frontal views of adult Caucasian and Asian faces [50], but their performance
+</td></tr><tr><td>40273657e6919455373455bd9a5355bb46a7d614</td><td>Anonymizing k-Facial Attributes via Adversarial Perturbations
+<br/>1 IIIT Delhi, New Delhi, India
+<br/>2 Ministry of Electronics and Information Technology, New Delhi, India
</td></tr><tr><td>40b10e330a5511a6a45f42c8b86da222504c717f</td><td>Implementing the Viola-Jones
<br/>Face Detection Algorithm
<br/>Kongens Lyngby 2008
@@ -8448,7 +10770,11 @@
</td></tr><tr><td>401e6b9ada571603b67377b336786801f5b54eee</td><td>Active Image Clustering: Seeking Constraints from
<br/>Humans to Complement Algorithms
<br/>November 22, 2011
-</td></tr><tr><td>2e20ed644e7d6e04dd7ab70084f1bf28f93f75e9</td><td></td></tr><tr><td>2eb37a3f362cffdcf5882a94a20a1212dfed25d9</td><td>4
+</td></tr><tr><td>2e20ed644e7d6e04dd7ab70084f1bf28f93f75e9</td><td></td></tr><tr><td>2e8e6b835e5a8f55f3b0bdd7a1ff765a0b7e1b87</td><td>International Journal of Computer Vision manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Pointly-Supervised Action Localization
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>2eb37a3f362cffdcf5882a94a20a1212dfed25d9</td><td>4
<br/>Local Feature Based Face Recognition
<br/>R.I.T., Rajaramnagar and S.G.G.S. COE &T, Nanded
<br/>India
@@ -8485,10 +10811,13 @@
<br/>region as a input to face recognition system and constructs a lower dimensional subspace
<br/>using principal component analysis (PCA) (Turk & Pentland, 1991), linear discriminant
<br/>www.intechopen.com
-</td></tr><tr><td>2e0e056ed5927a4dc6e5c633715beb762628aeb0</td><td></td></tr><tr><td>2e68190ebda2db8fb690e378fa213319ca915cf8</td><td>Generating Videos with Scene Dynamics
+</td></tr><tr><td>2e5cfa97f3ecc10ae8f54c1862433285281e6a7c</td><td></td></tr><tr><td>2e0e056ed5927a4dc6e5c633715beb762628aeb0</td><td></td></tr><tr><td>2e68190ebda2db8fb690e378fa213319ca915cf8</td><td>Generating Videos with Scene Dynamics
<br/>MIT
<br/>UMBC
<br/>MIT
+</td></tr><tr><td>2e0d56794379c436b2d1be63e71a215dd67eb2ca</td><td>Improving precision and recall of face recognition in SIPP with combination of
+<br/>modified mean search and LSH
+<br/>Xihua.Li
</td></tr><tr><td>2ee8900bbde5d3c81b7ed4725710ed46cc7e91cd</td><td></td></tr><tr><td>2ef51b57c4a3743ac33e47e0dc6a40b0afcdd522</td><td>Leveraging Billions of Faces to Overcome
<br/>Performance Barriers in Unconstrained Face
<br/>Recognition
@@ -8636,7 +10965,7 @@
</td></tr><tr><td>47f8b3b3f249830b6e17888df4810f3d189daac1</td><td></td></tr><tr><td>47e8db3d9adb79a87c8c02b88f432f911eb45dc5</td><td>MAGMA: Multi-level accelerated gradient mirror descent algorithm for
<br/>large-scale convex composite minimization
<br/>July 15, 2016
-</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td></td></tr><tr><td>477811ff147f99b21e3c28309abff1304106dbbe</td><td></td></tr><tr><td>78a4cabf0afc94da123e299df5b32550cd638939</td><td></td></tr><tr><td>78f08cc9f845dc112f892a67e279a8366663e26d</td><td>TECHNISCHE UNIVERSIT ¨AT M ¨UNCHEN
+</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td></td></tr><tr><td>477811ff147f99b21e3c28309abff1304106dbbe</td><td></td></tr><tr><td>47e14fdc6685f0b3800f709c32e005068dfc8d47</td><td></td></tr><tr><td>782188821963304fb78791e01665590f0cd869e8</td><td></td></tr><tr><td>78a4cabf0afc94da123e299df5b32550cd638939</td><td></td></tr><tr><td>78f08cc9f845dc112f892a67e279a8366663e26d</td><td>TECHNISCHE UNIVERSIT ¨AT M ¨UNCHEN
<br/>Lehrstuhl f¨ur Mensch-Maschine-Kommunikation
<br/>Semi-Autonomous Data Enrichment and
<br/>Optimisation for Intelligent Speech Analysis
@@ -8732,7 +11061,10 @@
</td></tr><tr><td>78fdf2b98cf6380623b0e20b0005a452e736181e</td><td></td></tr><tr><td>788a7b59ea72e23ef4f86dc9abb4450efefeca41</td><td></td></tr><tr><td>8b7191a2b8ab3ba97423b979da6ffc39cb53f46b</td><td>Search Pruning in Video Surveillance Systems: Efficiency-Reliability Tradeoff
<br/>EURECOM
<br/>Sophia Antipolis, France
-</td></tr><tr><td>8b8728edc536020bc4871dc66b26a191f6658f7c</td><td></td></tr><tr><td>8bf647fed40bdc9e35560021636dfb892a46720e</td><td>Learning to Hash-tag Videos with Tag2Vec
+</td></tr><tr><td>8b8728edc536020bc4871dc66b26a191f6658f7c</td><td></td></tr><tr><td>8b744786137cf6be766778344d9f13abf4ec0683</td><td>978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+<br/>2697
+<br/>ICASSP 2016
+</td></tr><tr><td>8bf647fed40bdc9e35560021636dfb892a46720e</td><td>Learning to Hash-tag Videos with Tag2Vec
<br/>CVIT, KCIS, IIIT Hyderabad, India
<br/>P J Narayanan
<br/>http://cvit.iiit.ac.in/research/projects/tag2vec
@@ -8878,6 +11210,17 @@
<br/>Sparse Output Coding for Scalable Visual Recognition
<br/>Received: 15 May 2013 / Accepted: 16 June 2015 / Published online: 26 June 2015
<br/>© Springer Science+Business Media New York 2015
+</td></tr><tr><td>7f4bc8883c3b9872408cc391bcd294017848d0cf</td><td>
+<br/>
+<br/>Computer
+<br/>Sciences
+<br/>Department
+<br/>The Multimodal Focused Attribute Model: A Nonparametric
+<br/>Bayesian Approach to Simultaneous Object Classification and
+<br/>Attribute Discovery
+<br/>Technical Report #1697
+<br/>January 2012
+<br/>
</td></tr><tr><td>7f6061c83dc36633911e4d726a497cdc1f31e58a</td><td>YouTube-8M: A Large-Scale Video Classification
<br/>Benchmark
<br/>Paul Natsev
@@ -8940,12 +11283,18 @@
<br/>way aia
<br/>  ea whi
<br/>de deve
+</td></tr><tr><td>7a81967598c2c0b3b3771c1af943efb1defd4482</td><td>Do We Need More Training Data?
</td></tr><tr><td>7ad77b6e727795a12fdacd1f328f4f904471233f</td><td>Supervised Local Descriptor Learning
<br/>for Human Action Recognition
-</td></tr><tr><td>7aa4c16a8e1481629f16167dea313fe9256abb42</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+</td></tr><tr><td>7a97de9460d679efa5a5b4c6f0b0a5ef68b56b3b</td><td></td></tr><tr><td>7aa4c16a8e1481629f16167dea313fe9256abb42</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
<br/>2981
<br/>ICASSP 2017
</td></tr><tr><td>7a85b3ab0efb6b6fcb034ce13145156ee9d10598</td><td></td></tr><tr><td>7ab930146f4b5946ec59459f8473c700bcc89233</td><td></td></tr><tr><td>7ad7897740e701eae455457ea74ac10f8b307bed</td><td>Random Subspace Two-dimensional LDA for Face Recognition*
+</td></tr><tr><td>7a7b1352d97913ba7b5d9318d4c3d0d53d6fb697</td><td>Attend and Rectify: a Gated Attention
+<br/>Mechanism for Fine-Grained Recovery
+<br/>†Computer Vision Center and Universitat Aut`onoma de Barcelona (UAB),
+<br/>Campus UAB, 08193 Bellaterra, Catalonia Spain
+<br/>‡Visual Tagging Services, Parc de Recerca, Campus UAB
</td></tr><tr><td>1451e7b11e66c86104f9391b80d9fb422fb11c01</td><td>IET Signal Processing
<br/>Research Article
<br/>Image privacy protection with secure JPEG
@@ -9132,6 +11481,14 @@
</td></tr><tr><td>14a5feadd4209d21fa308e7a942967ea7c13b7b6</td><td>978-1-4673-0046-9/12/$26.00 ©2012 IEEE
<br/>1025
<br/>ICASSP 2012
+</td></tr><tr><td>14fee990a372bcc4cb6dc024ab7fc4ecf09dba2b</td><td>Modeling Spatio-Temporal Human Track Structure for Action
+<br/>Localization
+</td></tr><tr><td>14ee4948be56caeb30aa3b94968ce663e7496ce4</td><td>Jang, Y; Gunes, H; Patras, I
+<br/>© Copyright 2018 IEEE
+<br/>For additional information about this publication click this link.
+<br/>http://qmro.qmul.ac.uk/xmlui/handle/123456789/36405
+<br/>Information about this research object was correct at the time of download; we occasionally
+<br/>make corrections to records, please therefore check the published record when citing. For
</td></tr><tr><td>8ee62f7d59aa949b4a943453824e03f4ce19e500</td><td>Robust Head-Pose Estimation Based on
<br/>Partially-Latent Mixture of Linear Regression
<br/>∗INRIA Grenoble Rhˆone-Alpes, Montbonnot Saint-Martin, France
@@ -9171,6 +11528,7 @@
<br/>[11]. This could indicate that image manipulations tend to equalize face recognition abilities, and
<br/>we investigate whether this is the case with the manipulations and face recognition algorithms we
<br/>test.
+</td></tr><tr><td>8e3d0b401dec8818cd0245c540c6bc032f169a1d</td><td>McGan: Mean and Covariance Feature Matching GAN
</td></tr><tr><td>8e94ed0d7606408a0833e69c3185d6dcbe22bbbe</td><td>© 2012 IEEE. Personal use of this material is permitted. Permission from IEEE
<br/>must be obtained for all other uses, in any current or future media, including
<br/>reprinting/republishing this material for advertising or promotional purposes,
@@ -9186,7 +11544,7 @@
<br/>Institut Eur´ecom
<br/>Multimedia Communications Department
<br/>BP 193, 06904 Sophia Antipolis Cedex, France
-</td></tr><tr><td>8e8e3f2e66494b9b6782fb9e3f52aeb8e1b0d125</td><td>in any current or
+</td></tr><tr><td>8ed32c8fad924736ebc6d99c5c319312ba1fa80b</td><td></td></tr><tr><td>8e8e3f2e66494b9b6782fb9e3f52aeb8e1b0d125</td><td>in any current or
<br/>future media,
<br/>for all other uses,
<br/> 2012 IEEE. Personal use of this material is permitted. Permission from IEEE must be
@@ -9217,6 +11575,15 @@
<br/>am 18.09.2008 angenommen.
</td></tr><tr><td>8ed051be31309a71b75e584bc812b71a0344a019</td><td>Class-based feature matching across unrestricted
<br/>transformations
+</td></tr><tr><td>8e36100cb144685c26e46ad034c524b830b8b2f2</td><td>Modeling Facial Geometry using Compositional VAEs
+<br/>1 ´Ecole Polytechnique F´ed´erale de Lausanne
+<br/>2Facebook Reality Labs, Pittsburgh
+</td></tr><tr><td>8e0becfc5fe3ecdd2ac93fabe34634827b21ef2b</td><td>International Journal of Computer Vision manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Learning from Longitudinal Face Demonstration -
+<br/>Where Tractable Deep Modeling Meets Inverse Reinforcement Learning
+<br/>Savvides · Tien D. Bui
+<br/>Received: date / Accepted: date
</td></tr><tr><td>225fb9181545f8750061c7693661b62d715dc542</td><td></td></tr><tr><td>22043cbd2b70cb8195d8d0500460ddc00ddb1a62</td><td>Separability-Oriented Subclass Discriminant
<br/>Analysis
</td></tr><tr><td>22137ce9c01a8fdebf92ef35407a5a5d18730dde</td><td></td></tr><tr><td>22dada4a7ba85625824489375184ba1c3f7f0c8f</td><td></td></tr><tr><td>223ec77652c268b98c298327d42aacea8f3ce23f</td><td>TR-CS-11-02
@@ -9226,6 +11593,14 @@
<br/>ANU Computer Science Technical Report Series
</td></tr><tr><td>227b18fab568472bf14f9665cedfb95ed33e5fce</td><td>Compositional Dictionaries for Domain Adaptive
<br/>Face Recognition
+</td></tr><tr><td>227b1a09b942eaf130d1d84cdcabf98921780a22</td><td>Yang et al. EURASIP Journal on Advances in Signal Processing (2018) 2018:51
+<br/>https://doi.org/10.1186/s13634-018-0572-6
+<br/>EURASIP Journal on Advances
+<br/>in Signal Processing
+<br/>R ES EAR CH
+<br/>Multi-feature shape regression for face
+<br/>alignment
+<br/>Open Access
</td></tr><tr><td>22dabd4f092e7f3bdaf352edd925ecc59821e168</td><td> Deakin Research Online
<br/>This is the published version:
<br/>An, Senjian, Liu, Wanquan and Venkatesh, Svetha 2008, Exploiting side information in
@@ -9284,7 +11659,7 @@
<br/>2016
</td></tr><tr><td>25d3e122fec578a14226dc7c007fb1f05ddf97f7</td><td>The First Facial Expression Recognition and Analysis Challenge
</td></tr><tr><td>2597b0dccdf3d89eaffd32e202570b1fbbedd1d6</td><td>Towards predicting the likeability of fashion images
-</td></tr><tr><td>25e05a1ea19d5baf5e642c2a43cca19c5cbb60f8</td><td>Label Distribution Learning
+</td></tr><tr><td>25982e2bef817ebde7be5bb80b22a9864b979fb0</td><td></td></tr><tr><td>25e05a1ea19d5baf5e642c2a43cca19c5cbb60f8</td><td>Label Distribution Learning
</td></tr><tr><td>2559b15f8d4a57694a0a33bdc4ac95c479a3c79a</td><td>570
<br/>Contextual Object Localization With Multiple
<br/>Kernel Nearest Neighbor